Setup

library(repro)
# load packages from yaml header
automate_load_packages()
# include external scripts
automate_load_scripts()

# load data
intern  <- automate_load_data(intern, read.csv, stringsAsFactors = T)
medic    <- automate_load_data(medic, read.csv, stringsAsFactors = T)
PAV      <- automate_load_data(PAV, read.csv, stringsAsFactors = T)
INST     <- automate_load_data(INST, read.csv, stringsAsFactors = T)
PIT      <- automate_load_data(PIT, read.csv, stringsAsFactors = T)
HED      <- automate_load_data(HED, read.csv, stringsAsFactors = T)
HED_fMRI <- automate_load_data(HED_fMRI, read.csv, stringsAsFactors = T)

x = session_info();  opts_chunk$set(warning = FALSE, message = FALSE) # set F for all


## we recommend running this is a fresh R session or restarting your current session
#install.packages("cmdstanr", repos = c("https://mc-stan.org/r-packages/", getOption("repos")))
#install_cmdstan()


# check_git(); check_make(); check_docker() #check if installed

sessio = session_info(); #opts_chunk$set(echo = F, message=F, warning=F) # set echo F for all

#May I suggest running `repro::automate()`? 

#This will create a `Dockerfile` & `Makefile` based on every RMarkdown in this folder and the special yamls in them. date: "`r format(Sys.time(), '%d %B, %Y')`" 

#add ENV DEBIAN_FRONTEND=noninteractive to DOCKERFILE

This file was automatically created via the Repro package (version 0.1.0) using R version 4.0.4 (2021-02-15)

niter = 500; warm = 100; chains = 4; cores = 4; nsim = 10000 # number of iterations (to change if you want to quick check and warmups (BUT chains and BF might be really unstable if you have less than 20'000 iter (4x5000) ) #or also parallel::detectCores()/2)
options(scipen = 666, warn=-1, contrasts=c("contr.sum","contr.poly"), mc.cores = cores)  #remove scientific notation # remove warnings #set contrasts to sum ! #remove scientific notation # remove warnings #set contrasts to sum !
 #cl = parallel::detectCores()/2
set.seed(666) #set random seed
control = lmerControl(optimizer ='optimx', optCtrl=list(method='nlminb')) #set "better" lmer optimizer #nolimit # yoloptimizer
#emm_options(pbkrtest.limit = 8000) #increase repetitions limit for frequentist stats

source('R/plots.R', echo=F)# plot specification
source('R/utils.R', echo=F)# useful functions

panderOptions('knitr.auto.asis', FALSE) #remove auto styling

labels <- c("-1" = "Pre", "1" = "Post")#for plots

# Look at R/clean.R (listed in the YAML) which does all the preprocessing for more info

Description

TODO blabla

Demographics

egltable(c("BMI", "AGE", "GENDER"), 
  g = "INTERVENTION", data = df, strict = FALSE) %>%
  kbl(caption ="Summary statistics", digits = 2) %>%
  kable_styling(latex_options = "HOLD_position", position = "center", full_width = F) %>%
  row_spec(0,bold=T,align='c')
Summary statistics
Placebo M (SD)/N (%) Liraglutide M (SD)/N (%) Test
BMI 35.08 (2.96) 35.74 (2.93) t(df=47) = -0.78, p = .438, d = 0.22
AGE 40.27 (13.74) 38.61 (11.72) t(df=47) = 0.45, p = .653, d = 0.13
GENDER Chi-square = 0.35, df = 1, p = .556, Phi = 0.08
Men 10 (38.5) 7 (30.4)
Women 16 (61.5) 16 (69.6)

Biomedical data

Variable Selection

# Boxplot of biomedical variables per group
ggplot(med)+
  geom_boxplot(aes(INTERVENTION, n))+
  facet_wrap(~feature, scales = "free")+
  labs(title = "")+
   theme_minimal()+
  theme(axis.title = element_text()) + 
  ylab("Biomedical predictor's value (scaled)") + 
  xlab('')
Box-plot of all biomedical predictors per intervention.

Box-plot of all biomedical predictors per intervention.

# Plot correlogram of numeric variables
#pairs(~., data = df[,8:19], main = "Scatterplot Matrix of variables")
#corrplot(cor(df[,8:19], use="pairwise.complete.obs"), type="lower")


Recursive Feature Eliminations

#1) with Recursive Feature Eliminations (CARET)

sizes = 1:length(dfmed[c(-1)]); len = length(sizes)
seeds <- vector(mode = "list", length = len)
for(i in 1:(len-1)) seeds[[i]]<- sample.int(n=nsim, size = length(sizes)+1)
# for the last model
seeds[[len]]<-sample.int(nsim, 1)

RFEcontrol <- rfeControl(functions=rfFuncs, method="cv", number=10, seeds= seeds) # control options

rfeResults = rfe(x = dfmed[c(-1)], y = dfmed$intervention, sizes=sizes, rfeControl=RFEcontrol)
predictors(rfeResults)
## [1] "BMI_diff"    "BW_diff"     "reelin_diff" "GLP_diff"
plot(rfeResults, type=c("g", "o")) # look for the "elbow"

#if we agree that BMI, Body Weight and Waist Circumference actually measure the same thing, there only 4 other variables that are "useful" to separate the two groups :
#Reelin and GLP

Mediation analysis

#parallel:::setDefaultClusterOptions(setup_strategy = "sequential")

med_res <- intmed::mediate(y = "weightLoss", med = c("GLP_diff" ,  "reelin_diff"),  treat = "intervention", ymodel = "regression", mmodel = c("regression", "regression"), treat_lv = 1, control_lv = 0, incint = TRUE, inc_mmint = FALSE, conf.level = 0.95, data = df , sim = nsim, complete_analysis = TRUE, digits = 3,  summary_report=F) #c = c("age","gender"),

table <- list.clean(readHTMLTable("res.html"), fun = is.null, recursive = FALSE); table = table[3]$`NULL`[1:6,]
#pander(table)
colnames(table)[4] = "p"; table$p = as.numeric(table$p); table$p = ifelse(as.numeric(table$p) < 0.05,paste("<span style=\" font-weight: bold; \" >" ,sprintf("%.3f",table$p), "</span>"),  paste("<span>" ,sprintf("%.3f",table$p), "</span>")) # highlight p < 0.05
table$p = ifelse(table$p == '<span style=" font-weight: bold; " > 0.000 </span>', "<span style=\" font-weight: bold;    \" >\u003C 0.001</span>", table$p)


table[-c(3:4),] %>%
  kbl(caption ="Mediation Analysis: DV = Weight loss, IV = Intervention", escape=F) %>%
  kable_styling(latex_options = "HOLD_position", position = "center", full_width = F)
Mediation Analysis: DV = Weight loss, IV = Intervention
Estimates 95% CI p
1 Indirect effect mediated through GLP_diff 0.017 (-0.278, 0.327) 0.914
2 Indirect effect mediated through reelin_diff 0.048 (-0.105, 0.248) 0.543
5 Direct effect 1.425*** (0.968, 1.880) < 0.001
6 Total effect 1.489*** (1.023, 1.988) < 0.001
#just for plot purpose (because not the same exact analysis)
medi =  psych::mediate(weightLoss ~ intervention + (GLP_diff) + (reelin_diff), data = df, n.iter = nsim, plot=F); psych::mediate.diagram(medi, show.c=FALSE)

Weight Loss

# Model
mf = formula(weightLoss ~ intervention + gender + age + GLP_diff + reelin_diff  + (1|id))

# -------------------------------------- Bayesian Regression Models using Stan -------------------------------------
# STAN is a probabilistic programming language that allows you to get full Bayesian statistical inference with MCMC sampling.
bmod_full = brm(mf, data=df, family = gaussian, prior = c(prior(normal(0, 3), class = "b", coef = ""), prior(normal(0, 100), class = "Intercept", coef = "")), sample_prior = T, save_pars = save_pars(all = TRUE), chains = chains,  iter = niter, warmup = warm, seed = 123, inits = 1, backend = 'cmdstanr', threads = threading(4)) # a lot to unwind here..  1) Generic informative prior around 0 for fixed effects and weak prior for the intercept 2) we need to sample priors and save parameters for computing BF  
## Running MCMC with 4 parallel chains, with 4 thread(s) per chain...
## 
## Chain 1 WARNING: There aren't enough warmup iterations to fit the 
## Chain 1          three stages of adaptation as currently configured. 
## Chain 1          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 1          the given number of warmup iterations: 
## Chain 1            init_buffer = 15 
## Chain 1            adapt_window = 75 
## Chain 1            term_buffer = 10 
## Chain 1 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 1 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 1 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 1 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 2 WARNING: There aren't enough warmup iterations to fit the 
## Chain 2          three stages of adaptation as currently configured. 
## Chain 2          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 2          the given number of warmup iterations: 
## Chain 2            init_buffer = 15 
## Chain 2            adapt_window = 75 
## Chain 2            term_buffer = 10 
## Chain 2 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 2 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 2 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 2 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 3 WARNING: There aren't enough warmup iterations to fit the 
## Chain 3          three stages of adaptation as currently configured. 
## Chain 3          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 3          the given number of warmup iterations: 
## Chain 3            init_buffer = 15 
## Chain 3            adapt_window = 75 
## Chain 3            term_buffer = 10 
## Chain 3 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 3 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 3 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 WARNING: There aren't enough warmup iterations to fit the 
## Chain 4          three stages of adaptation as currently configured. 
## Chain 4          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 4          the given number of warmup iterations: 
## Chain 4            init_buffer = 15 
## Chain 4            adapt_window = 75 
## Chain 4            term_buffer = 10 
## Chain 4 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 4 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 4 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 1 finished in 0.3 seconds.
## Chain 2 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 4 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 2 finished in 0.3 seconds.
## Chain 3 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 finished in 0.4 seconds.
## Chain 4 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 4 finished in 0.4 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 0.3 seconds.
## Total execution time: 0.7 seconds.
#problem here!!

#lmer to compare
fmod_full = lm(update(mf, ~.- (1|id)) , data = df)
## plot population-level effects posterior distributions and chain sampling

param = mcmc_plot(object = bmod_full, pars =c("b_.*en", "b_.*ge"), type ="areas")

trace = mcmc_plot(object = bmod_full, pars =c("b_.*en", "b_.*ge"), type ="trace")

#check assumptions
var_group = pp_check(bmod_full, type = "stat_grouped", group = "intervention", binwidth = 0.1, nsamples = NULL) #equality of variance between groups

rep_fit = pp_check(bmod_full, nsamples = 100) # check response fit

error = pp_check(bmod_full, type ="error_scatter_avg", nsamples = NULL) # check good alignment between model and data, and no obvious pattern to the types of errors we are getting.


#Normality of errors
residuals <- residuals(bmod_full)[, 1]; res <- qqnorm(residuals, pch = 1, plot.it=F)

lmx = plot_model(fmod_full, type = "diag"); #diagnostic plots for lmer

#plot all
diagWEIGHT <- ggarrange(param, var_group, rep_fit, error, ncol = 2, nrow = 2)

annotate_figure(diagWEIGHT, top = text_grob("Diagnostic Plots", face = "bold", size = 10))

full_tab = describe_posterior(bmod_full,
                   estimate = "median", dispersion = T,
                   ci = .9, ci_method = "hdi",
                   bf_prior = bmod_full, diagnostic = "Rhat",  
                   test = c("p_direction", "bf"))

full_tab = filter(full_tab, Parameter %notin% c("b_Intercept", "prior_sigma"))



# -------------------------------------- FREQUENTIST STATS: AOV -----------------------------------------------

# formula = 'weightLoss ~ intervention + gender + age  + Error(id)'
# 
# 
# mdl.weight = aov_car(as.formula(formula), data = df, factorize = FALSE,  anova_table = list(correction = "GG", es = "pes"),type = "II")
# res = nice(mdl.weight, MSE = F); ref_grid(mdl.weight); res
# 
# PES.weight = pes_ci(as.formula(formula), data = df, conf.level = .90, factorize = FALSE, anova.type = "II", epsilon="none") ; 
# mdl.weight.emms = emmeans(mdl.weight, pairwise ~ intervention)


# res$p.value = as.numeric(res$p.value)
# res$p.value = ifelse(res$p.value < 0.05,paste("<span style=\" font-weight: bold; \" >" ,sprintf("%.3f",res$p.value), "</span>"),  paste("<span>" ,sprintf("%.3f",res$p.value), "</span>"))
# 
# res$F = unlist(str_split(gsub("[^0-9.,-]", "", res$F), ","));res$pes = unlist(str_split(gsub("[^0-9.,-]", "", res$pes), ","));
# res$`90% CI` = paste(sprintf("%.3f",PES.weight[,2]), "-", sprintf("%.3f",PES.weight[,3]))
# 
# res$p.value[1]= "<span style=\" font-weight: bold;    \" >\u003C 0.001</span>"
# colnames(res)[3:5] = c( paste("F(", res$df[1], ")", sep=""),"&eta;<sub>p</sub><sup>2</sup>", "p")
# res[c(1,4,6,3,5)]  %>% kbl(digits = 2, escape = F) %>%
#   kable_styling(latex_options = "striped", position = "center", full_width = F) 

# -------------------------------------- Regression table summary --------------------------------------

tab_model(bmod_full, show.p = F, show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 3, dv.labels = "Weight Loss (\u0394 BMI)", file = "tmp/temp1.html", transform = NULL,  rm.terms = "beta")
  Weight Loss (Δ BMI)
Predictors Estimates std. Error CI (95%)
intervention 1.356 0.234 0.918 – 1.804
gender 0.147 0.205 -0.244 – 0.551
age -0.296 0.202 -0.666 – 0.060
GLP diff -0.171 0.279 -0.726 – 0.418
reelin diff -0.051 0.291 -0.639 – 0.517
ICC 0.16
N id 45
Observations 45
Marginal R2 / Conditional R2 0.569 / 0.730
report = capture.output(sexit(bmod_full, ci=.9))
print(paste("Bayesian linear mixed model (estimated using MCMC sampling with " ,chains ," chains of", niter, " iterations and a warmup of", warm, ") to predict weightLoss with intervention, gender, age, GLP_diff and reelin_diff (formula: weightLoss ~ intervention + gender + age + GLP_diff + reelin_diff). The model included id as random effect (formula: ~1 | id). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 2.50) distributions"))
## [1] "Bayesian linear mixed model (estimated using MCMC sampling with  4  chains of 500  iterations and a warmup of 100 ) to predict weightLoss with intervention, gender, age, GLP_diff and reelin_diff (formula: weightLoss ~ intervention + gender + age + GLP_diff + reelin_diff). The model included id as random effect (formula: ~1 | id). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 2.50) distributions"
full_tab
## Summary of Posterior Distribution
## 
## Parameter    | Median |  MAD |         90% CI |     pd |  Rhat |     BF
## -----------------------------------------------------------------------
## intervention |   1.36 | 0.23 | [ 1.00,  1.75] |   100% | 1.008 | > 1000
## gender       |   0.15 | 0.20 | [-0.20,  0.46] | 77.25% | 1.005 |  0.080
## age          |  -0.30 | 0.20 | [-0.61,  0.00] | 94.50% | 1.012 |  0.241
## GLP_diff     |  -0.17 | 0.28 | [-0.64,  0.28] | 73.44% | 1.007 |  0.120
## reelin_diff  |  -0.05 | 0.29 | [-0.50,  0.43] | 57.63% | 1.004 |  0.096
report[4]
## [1] "- b_intervention (Median = 1.36, 90% CI [1.00, 1.75]) has a 100.00% probability of being positive (> 0), 100.00% of being significant (> 0.09), and 99.94% of being large (> 0.54)"
tables <- list.clean(readHTMLTable("tmp/temp1.html"), fun = is.null, recursive = FALSE)
tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
tables2 <- as.matrix(tables2) %>% as_tibble();


tables2[is.na(tables2)] <- "";  names(tables2) <- NULL; tmp = t(tables2[(length(full_tab$Parameter)+1):(length(full_tab$Parameter)+4),1:2]);
tmp[,4][1] = "R2"; tmp[,4][2] = gsub(".*/","",tmp[,4][2]); colnames(tmp) =  tmp[1,]
pander:: pander(tmp[2,])
## 
## ------------------------------------
##  ICC    N id   Observations    R2   
## ------ ------ -------------- -------
##  0.16    45         45        0.730 
## ------------------------------------

Plot Weight Loss

dfdraws = bmod_full %>%
    spread_draws(`b_intervention` )


HDI_weight = plotHDI( dfdraws$`b_intervention` , credMass = .90, binSize = 100, Title = "") + theme_bw() + html_theme  


dfdraws2 =  bmod_full %>%
     emmeans(~ intervention) %>%
     gather_emmeans_draws() 


pp = dfdraws2 %>%
    ggplot(aes(x = as.factor(intervention) , y = .value,  fill = as.factor(intervention))) +
    geom_abline(slope= 0, intercept=0, linetype = "dashed", color = "black") +
    stat_slab(.width = c(0.50, 0.9), position="dodge", alpha=0.5) +
    stat_pointinterval(.width = c(0.50, 0.9),position="dodge") +
    labs(x = "",y = "Weight Loss (\u0394 BMI)", title = "") + 
    scale_fill_manual(values=c("-1" = pal[1],"1"=pal[6]), guide="none") +
    scale_color_manual( values=c("-1" = pal[1],"1"=pal[6]), guide="none") +
    scale_x_discrete(labels=c("Placebo", "Liraglutide")) +
  #scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(400,550, by = 50)), limits = c(380,575)) +
    theme_bw()

plt_bayes_html = pp + html_theme 
plt_bayes = pp + averaged_theme 


figureWL <- ggarrange(plt_bayes_html, HDI_weight,
                    labels = c("A", "B"),
                    ncol = 2)
#figureWL




dfR <- summarySE(df, measurevar = "weightLoss",
                 groupvars = "INTERVENTION")

dfR$cond <- ifelse(dfR$INTERVENTION == "Liraglutide", -0.25, 0.25)
df$cond <- ifelse(df$INTERVENTION == "Liraglutide", -0.25, 0.25)
df <- df %>% mutate(condjit = jitter(as.numeric(cond), 0.3),
                    grouping = interaction(id, cond))

pp <- ggplot(df, aes(x = cond, y = weightLoss, 
                     fill = INTERVENTION, color = INTERVENTION)) +
  geom_hline(yintercept=0, linetype="dashed", size=0.4, alpha=0.8) +
  geom_flat_violin(scale = "count", trim = FALSE, alpha = .2, aes(fill = INTERVENTION, color = NA))+
  geom_point(aes(x = condjit), alpha = .3,) +
  geom_crossbar(data = dfR, aes(y = weightLoss, ymin=weightLoss-se, ymax=weightLoss+se), width = 0.2 , alpha = 0.1)+
  ylab('Weight loss (\u0394 BMI)')+xlab('')+
  scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(-2.5,7.5, by = 2.5)), limits = c(-3,8)) +
  scale_x_continuous(labels=c("Liraglutide", "Placebo"),breaks = c(-.25,.25), limits = c(-.5,.5)) +
  scale_fill_manual(values=c("Liraglutide"= pal[6], "Placebo"=  pal[1]), guide = 'none') +
  scale_color_manual(values=c("Liraglutide"= pal[6], "Placebo"=  pal[1]), guide = 'none') +
  theme_bw() 

plt = pp + averaged_theme 
pp + html_theme 
Weight Loss by intervention.

Weight Loss by intervention.



cairo_pdf('figures/Figure_WL_Lira.pdf')
print(plt)
dev.off()
## png 
##   2
cairo_pdf('figures/Figure_WL_Lira_Bayes.pdf')
print(figureWL)
dev.off()
## png 
##   2

Pavlvovian Conditioning Task

Latency

Latency = time to detect the target (ms) & condition = CS+ or CS-

# Model
mf = formula(RT ~ condition*intervention*session + age + gender + BMI_V1  + hungry + GLP_diff + reelin_diff + (condition*session|id) + (1|trialxcondition))


# -------------------------------------- Bayesian Regression Models using Stan -------------------------------------
# STAN is a probabilistic programming language that allows you to get full Bayesian statistical inference with MCMC sampling.
bmod_full = brm(mf, data=PAV, family = exgaussian, prior = c(prior(normal(0, 3), class = "b", coef = ""), prior(normal(0, 100), class = "Intercept", coef = "")), sample_prior = T, save_pars = save_pars(all = TRUE), chains = chains,  iter = niter, warmup = warm, seed = 123, inits = 1, backend = 'cmdstanr', threads = threading(4), max_treedepth=15) # a lot to unwind here..  1) Generic informative prior around 0 for fixed effects and weak prior for the intercept 2) we need to sample priors and save parameters for computing BF  3) account for the skewness of reaction times #family = "exgaussian"
## Running MCMC with 4 parallel chains, with 4 thread(s) per chain...
## 
## Chain 1 WARNING: There aren't enough warmup iterations to fit the 
## Chain 1          three stages of adaptation as currently configured. 
## Chain 1          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 1          the given number of warmup iterations: 
## Chain 1            init_buffer = 15 
## Chain 1            adapt_window = 75 
## Chain 1            term_buffer = 10 
## Chain 1 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 WARNING: There aren't enough warmup iterations to fit the 
## Chain 2          three stages of adaptation as currently configured. 
## Chain 2          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 2          the given number of warmup iterations: 
## Chain 2            init_buffer = 15 
## Chain 2            adapt_window = 75 
## Chain 2            term_buffer = 10 
## Chain 2 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 WARNING: There aren't enough warmup iterations to fit the 
## Chain 3          three stages of adaptation as currently configured. 
## Chain 3          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 3          the given number of warmup iterations: 
## Chain 3            init_buffer = 15 
## Chain 3            adapt_window = 75 
## Chain 3            term_buffer = 10 
## Chain 3 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 WARNING: There aren't enough warmup iterations to fit the 
## Chain 4          three stages of adaptation as currently configured. 
## Chain 4          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 4          the given number of warmup iterations: 
## Chain 4            init_buffer = 15 
## Chain 4            adapt_window = 75 
## Chain 4            term_buffer = 10 
## Chain 4 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 3 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 4 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 4 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 1 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 1 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 2 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 4 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 2 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 3 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 1 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 2 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 4 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 2 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 3 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 finished in 238.4 seconds.
## Chain 1 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 1 finished in 243.8 seconds.
## Chain 4 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 4 finished in 253.0 seconds.
## Chain 2 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 2 finished in 266.5 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 250.4 seconds.
## Total execution time: 266.7 seconds.
#lmer to compare
fmod_full = lmer(mf , data = PAV, REML=F, control = control )
## plot population-level effects posterior distributions and chain sampling

param = mcmc_plot(object = bmod_full, pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="areas")

trace = mcmc_plot(object = bmod_full, pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="trace")


#check assumptions
var_group = pp_check(bmod_full, type = "stat_grouped", group = "intervention", binwidth = 1, nsamples = NULL) #equality of variance between groups

rep_fit = pp_check(bmod_full, nsamples = 10) # check response fit -> accounting for skewness, nice!

error = pp_check(bmod_full, type ="error_scatter_avg", nsamples = NULL) # check good alignment between model and data, and no obvious pattern to the types of errors we are getting.


#Normality of errors
residuals <-residuals(bmod_full)[, 1]; res <- qqnorm(residuals, pch = 1, plot.it=F)

lmx = plot_model(fmod_full, type = "diag"); #diagnostic plots for lmer

#plot all
diagRT <- ggarrange(param, var_group, rep_fit, error, ncol = 2, nrow = 2)

annotate_figure(diagRT, top = text_grob("Diagnostic Plots", face = "bold", size = 10))

full_tab = describe_posterior(bmod_full,
                   estimate = "median", dispersion = T,
                   ci = .9, ci_method = "hdi",
                   bf_prior = bmod_full, diagnostic = "Rhat",  
                   test = c("p_direction", "bf"))

full_tab = filter(full_tab, Parameter %notin% c("b_Intercept", "prior_beta", "prior_sigma"))



# -------------------------------------- FREQUENTIST STATS: LRT -----------------------------------------------
#measurevar = "RT"
# formula = "condition*intervention*session"
# COVA = "+ condition*age + gender + BMI_V1  + thirsty  + hungry +"
# random = " (condition*session|id) + (1|trialxcondition)"
# 
# #method LRT 
# model = mixed(as.formula(paste(measurevar, paste (formula, COVA, random), sep="~")), data = PAV, method = "LRT", control = control, REML = FALSE); model
# 
# table = nice(model);  ref_grid(model)  #triple check everything is centered at 0
# 
# 
# mod =  lmer(as.formula(paste(measurevar, paste (formula, COVA, random), sep="~")), data = PAV, control = control)

# tab_model(mod, show.p = T,show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 2, dv.labels = "Latency", emph.p = TRUE, file = "tmp/temp1.html")

# tables <- list.clean(readHTMLTable("tmp/temp1.html"), fun = is.null, recursive = FALSE)
# tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
# 
# tables2 <- as.matrix(tables2) %>% as_tibble()
# tables2[is.na(tables2)] <- ""
# tables3 = tables2[1:length(table$Effect),1:4] 
# tables3[5] = str_split(gsub("[^0-9.,-]", "", table[3]), ",")[[1]]; tables3[6] = as.numeric(str_split(gsub("[^0-9.,-]", "", table[4]), ",")[[1]]); 
# tables3$...6 = ifelse(tables3$...6 < 0.05,paste("<span style=\" font-weight: bold; \" >" ,sprintf("%.3f",tables3$...6), "</span>"),  paste("<span>" ,sprintf("%.3f",tables3$...6), "</span>"))
# colnames(tables3)[5] = "\u03C7\u00B2"; colnames(tables3)[6] = "p"
# tables3$p[1]= "<span style=\" font-weight: bold;    \" >\u003C 0.001</span>"
# tables3 %>%   
#   kbl(caption ="Latency (ms)",escape = F) %>%
#   kable_styling(latex_options = "HOLD_position", position = "center", full_width = F) %>%  row_spec(0,bold=T,align='c')
# tmp = tables2[(length(table$Effect)+1):(length(table$Effect)+5),1:2]
# names(tmp) <- NULL
# tmp1 <- data.frame(t(tmp[-1]))
# colnames(tmp1) <- tmp[[1]]
# tmp1 %>% kbl(digits = 2) %>%
#   kable_styling(latex_options = "HOLD_position", position = "center", full_width = F) 


# -------------------------------------- Regression table summary --------------------------------------

tab_model(bmod_full, show.p = F, show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 3, dv.labels = "Latency (ms)", file = "tmp/temp2.html", transform = NULL,  rm.terms = "beta")
  Latency (ms)
Predictors Estimates std. Error CI (95%)
condition -4.794 2.492 -8.999 – 1.763
intervention 0.363 2.987 -5.283 – 6.169
session 0.451 2.636 -5.064 – 5.503
age 1.493 3.138 -3.994 – 7.651
gender 0.261 2.838 -5.437 – 5.878
BMI V 1 0.445 2.786 -4.918 – 5.811
hungry -1.932 2.998 -7.576 – 2.952
GLP diff -0.435 3.093 -6.172 – 5.309
reelin diff -0.576 2.939 -6.453 – 5.184
condition.intervention -0.277 1.869 -4.025 – 3.277
condition.session -1.751 1.867 -5.347 – 2.733
intervention.session -1.297 2.562 -6.086 – 3.831
condition.intervention.session 0.258 1.722 -3.339 – 3.402
ICC 1.00
N id 50
N trialxcondition 20
Observations 3223
Marginal R2 / Conditional R2 0.004 / 0.203
report = capture.output(sexit(bmod_full, ci=.9))
print(paste("Bayesian general linear mixed model (exgaussian family with a identity link) (estimated using MCMC sampling with " ,chains ,"chains of", niter, "iterations and a warmup of", warm, ") to predict Latency with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: RT ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff). The model included condition, session, id and trialxcondition as random effects (formula: list(~condition * session | id, ~1 | trialxcondition)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 130.20) distributions"))
## [1] "Bayesian general linear mixed model (exgaussian family with a identity link) (estimated using MCMC sampling with  4 chains of 500 iterations and a warmup of 100 ) to predict Latency with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: RT ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff). The model included condition, session, id and trialxcondition as random effects (formula: list(~condition * session | id, ~1 | trialxcondition)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 130.20) distributions"
full_tab
## Summary of Posterior Distribution
## 
## Parameter                      | Median |  MAD |         90% CI |     pd |  Rhat |    BF
## ----------------------------------------------------------------------------------------
## condition                      |  -4.79 | 2.49 | [-8.65, -0.19] | 94.12% | 1.227 |  4.69
## intervention                   |   0.36 | 2.99 | [-3.86,  5.62] | 55.25% | 1.020 | 0.975
## session                        |   0.45 | 2.64 | [-3.97,  4.83] | 57.56% | 1.009 | 0.886
## age                            |   1.49 | 3.14 | [-3.27,  6.50] | 68.75% | 1.039 |  1.01
## gender                         |   0.26 | 2.84 | [-4.47,  4.78] | 53.87% | 1.014 |  1.04
## BMI_V1                         |   0.44 | 2.79 | [-3.89,  4.89] | 55.75% | 1.010 | 0.824
## hungry                         |  -1.93 | 3.00 | [-6.58,  2.36] | 74.62% | 1.011 |  1.05
## GLP_diff                       |  -0.44 | 3.09 | [-5.72,  4.04] | 56.56% | 1.018 |  1.21
## reelin_diff                    |  -0.58 | 2.94 | [-5.44,  4.42] | 56.81% | 1.034 |  1.05
## condition:intervention         |  -0.28 | 1.87 | [-3.47,  2.63] | 57.19% | 1.006 | 0.612
## condition:session              |  -1.75 | 1.87 | [-4.90,  1.78] | 80.44% | 1.051 |  1.13
## intervention:session           |  -1.30 | 2.56 | [-5.44,  3.07] | 70.31% | 1.039 |  1.06
## condition:intervention:session |   0.26 | 1.72 | [-2.55,  3.03] | 55.44% | 1.005 | 0.644
report[4]
## [1] "- b_condition (Median = -4.79, 90% CI [-8.65, -0.19]) has a 94.12% probability of being negative (< 0), 93.94% of being significant (< -0.05), and 93.00% of being large (< -0.30)"
tables <- list.clean(readHTMLTable("tmp/temp2.html"), fun = is.null, recursive = FALSE)
tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
tables2 <- as.matrix(tables2) %>% as_tibble()

# check for bad ICC?
tables2[is.na(tables2)] <- "";  names(tables2) <- NULL; tmp = t(tables2[(length(full_tab$Parameter)+1):(length(full_tab$Parameter)+5),1:2]);
tmp[,5][1] = "R2"; tmp[,5][2] = gsub(".*/","",tmp[,5][2]); colnames(tmp) =  tmp[1,]
pander:: pander(tmp[2,])
## 
## --------------------------------------------------------
##  ICC    N id   N trialxcondition   Observations    R2   
## ------ ------ ------------------- -------------- -------
##  1.00    50           20               3223       0.204 
## --------------------------------------------------------


Plot Latency

dfdraws = bmod_full %>%
    spread_draws(`b_condition` )


HDI_RT = plotHDI( dfdraws$`b_condition` , credMass = .90, binSize = 100, Title = "") + theme_bw() + html_theme  


dfdraws2 =  bmod_full %>%
     emmeans(~ condition) %>%
     gather_emmeans_draws() 


pp = dfdraws2 %>%
    ggplot(aes(x = as.factor(condition) , y = .value,  fill = as.factor(condition))) +
    #geom_abline(slope= 0, intercept=0, linetype = "dashed", color = "black") +
    stat_slab(.width = c(0.50, 0.9), position="dodge", alpha=0.5) +
    stat_pointinterval(.width = c(0.50, 0.9),position="dodge") +
    labs(x = "", y = "Latency (ms)", title = "") + 
    scale_fill_manual(values=c("-1" = pal[1],"1"=pal[2]), guide="none") +
    scale_color_manual( values=c("-1" = pal[1],"1"=pal[2]), guide="none") +
    scale_x_discrete(labels=c("CS-", "CS+")) +
  #scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(400,550, by = 50)), limits = c(380,575)) +
    theme_bw()

plt_bayes_html = pp + html_theme 
plt_bayes = pp + averaged_theme 


figureRT <- ggarrange(plt_bayes_html, HDI_RT,
                    labels = c("A", "B"),
                    ncol = 2)
#figureRT



# RT


dfR <- summarySEwithin(PAV.means,
                       measurevar = "RT",
                       withinvars = c("condition","session"), 
                       idvar = "id")

dfR$cond <- ifelse(dfR$condition == "1", -0.25, 0.25)
PAV.means$cond <- ifelse(PAV.means$condition == "1", -0.25, 0.25)
PAV.means <- PAV.means %>% mutate(condjit = jitter(as.numeric(cond), 0.3),
                                  grouping = interaction(id, cond))

pp <- ggplot(PAV.means, aes(x = cond, y = RT, 
                            fill = as.factor(condition), color = as.factor(condition))) +
  geom_line(aes(x = condjit, group = id, y = RT), alpha = .3, size = 0.5, color = 'gray') +
  geom_flat_violin(scale = "count", trim = FALSE, alpha = .2, aes(fill = as.factor(condition), color = NA))+
  geom_point(aes(x = condjit, shape = as.factor(intervention)), alpha = .3,) +
  geom_crossbar(data = dfR, aes(y = RT, ymin=RT-se, ymax=RT+se), width = 0.2 , alpha = 0.1)+
  ylab('Latency (ms)')+
  xlab('Conditioned stimulus')+
  scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(200,1000, by = 200)), limits = c(150,1050)) +
  scale_x_continuous(labels=c("CS+", "CS-"),breaks = c(-.25,.25), limits = c(-.5,.5)) +
  scale_fill_manual(values=c("1"= pal[2], "-1"=  pal[1]), guide = 'none') +
  scale_color_manual(values=c("1"= pal[2], "-1"=  pal[1]), guide = 'none') +
  scale_shape_manual(name="Intervention", labels=c("Placebo", "Liraglutide"), values = c(1, 2)) +
  theme_bw() + facet_wrap(~session, labeller=labeller(session = labels))

plt = pp + averaged_theme  + theme(legend.position=c(.96,.94), axis.text.x = element_text(size = 14))
pp + html_theme  + theme(legend.position=c(.96,.94), axis.text.x = element_text(size = 14))
A) Posterior distribution by Pavlovian cue. B) Highest density interval (90% HDI) of the posterior distribution difference for the latency to respond between CS+ and CS-

  1. Posterior distribution by Pavlovian cue. B) Highest density interval (90% HDI) of the posterior distribution difference for the latency to respond between CS+ and CS-

cairo_pdf('figures/Figure_PavlovianRT_Lira.pdf')
print(plt)
dev.off
## function (which = dev.cur()) 
## {
##     if (which == 1) 
##         stop("cannot shut down device 1 (the null device)")
##     .External(C_devoff, as.integer(which))
##     dev.cur()
## }
## <bytecode: 0x55c2d32bcbc0>
## <environment: namespace:grDevices>
cairo_pdf('figures/Figure_PavlovianRT_Lira_Bayes.pdf')
print(figureRT)
dev.off()
## png 
##   2

Perceived liking (Pavlovian Cue)

Ratings = how pleasant is the clue (0-100, no repetitions) & condition = CS+ or CS-

# Model
mf = formula(liking ~ condition*intervention*session + age + gender + BMI_V1  + hungry + GLP_diff + reelin_diff + (condition*session|id))

# -------------------------------------- Bayesian Regression Models using Stan -------------------------------------
bmod_full = brm(mf, data=PAV.means, family = gaussian, prior = c(prior(normal(0, 3), class = "b", coef = ""), prior(normal(0, 100), class = "Intercept", coef = "")), sample_prior = T, save_pars = save_pars(all = TRUE), chains = chains,  iter = niter, warmup = warm, seed = 123, inits = 1, backend = 'cmdstanr', threads = threading(4))  # a lot to unwind here..  1) Generic weakly informative prior around 0 for fixed effects and very weak prior for the intercept 2) we need to sample priors and save parameters for computing BF  
## Running MCMC with 4 parallel chains, with 4 thread(s) per chain...
## 
## Chain 1 WARNING: There aren't enough warmup iterations to fit the 
## Chain 1          three stages of adaptation as currently configured. 
## Chain 1          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 1          the given number of warmup iterations: 
## Chain 1            init_buffer = 15 
## Chain 1            adapt_window = 75 
## Chain 1            term_buffer = 10 
## Chain 1 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 WARNING: There aren't enough warmup iterations to fit the 
## Chain 2          three stages of adaptation as currently configured. 
## Chain 2          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 2          the given number of warmup iterations: 
## Chain 2            init_buffer = 15 
## Chain 2            adapt_window = 75 
## Chain 2            term_buffer = 10 
## Chain 2 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 WARNING: There aren't enough warmup iterations to fit the 
## Chain 3          three stages of adaptation as currently configured. 
## Chain 3          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 3          the given number of warmup iterations: 
## Chain 3            init_buffer = 15 
## Chain 3            adapt_window = 75 
## Chain 3            term_buffer = 10 
## Chain 3 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 WARNING: There aren't enough warmup iterations to fit the 
## Chain 4          three stages of adaptation as currently configured. 
## Chain 4          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 4          the given number of warmup iterations: 
## Chain 4            init_buffer = 15 
## Chain 4            adapt_window = 75 
## Chain 4            term_buffer = 10 
## Chain 4 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 1 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 1 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 2 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 3 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 4 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 4 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 1 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 3 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 4 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 2 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 1 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 2 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 4 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 2 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 finished in 3.2 seconds.
## Chain 3 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 4 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 finished in 3.3 seconds.
## Chain 4 finished in 3.3 seconds.
## Chain 2 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 2 finished in 3.9 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 3.4 seconds.
## Total execution time: 4.0 seconds.
#lmer to compare
fmod_full <- lmer(update(mf, ~ .-(condition*session|id)+(condition+session|id)) , data=PAV.means) #cannot maximize random structure here
## plot population-level effects posterior distributions and chain sampling

param = mcmc_plot(object = bmod_full, pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="areas")

trace = mcmc_plot(object = bmod_full, pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="trace")


#check assumptions
var_group = pp_check(bmod_full, type = "stat_grouped", group = "intervention", binwidth = 1, nsamples = NULL) #equality of variance between groups

rep_fit = pp_check(bmod_full, nsamples = 10) # check response fit 

error = pp_check(bmod_full, type ="error_scatter_avg", nsamples = NULL) # check good alignment between model and data, and no obvious pattern to the types of errors we are getting.


#Normality of errors
residuals <-residuals(bmod_full)[, 1]; res <- qqnorm(residuals, pch = 1, plot.it=F)

lmx = plot_model(fmod_full, type = "diag"); #diagnostic plots for lmer

#plot all
diagLik <- ggarrange(param, var_group, rep_fit, error, ncol = 2, nrow = 2)

annotate_figure(diagLik, top = text_grob("Diagnostic Plots", face = "bold", size = 10))

full_tab = describe_posterior(bmod_full,
                   estimate = "median", dispersion = T,
                   ci = .9, ci_method = "hdi",
                   bf_prior = bmod_full, diagnostic = "Rhat",  
                   test = c("p_direction", "bf"))

full_tab = filter(full_tab, Parameter %notin% c("b_Intercept", "prior_beta", "prior_sigma"))



# -------------------------------------- FREQUENTIST STATS: -----------------------------------------------

# measurevar = "liking"
# formula = "condition*intervention*session"
# COVA = "+ age + gender + BMI_V1 + "
# random = "Error(id/session*condition)"
#  
# mdl.lik = aov_car(as.formula(paste(measurevar, paste (formula, COVA, random), sep="~")), data= PAV.means, factorize = F, anova_table = list(correction = "GG", es = "pes"))
# res = nice(mdl.lik, MSE=F);  res = res[c(10,1:6,11,15,16),] #clean up unesuful an reorder
# ref_grid(mdl.lik)  #triple check everything is centered at 0
# 
# #calculate Partial eta-squared and its 90 % CI for each effect
# PES.lik = pes_ci(as.formula(paste(measurevar, paste (formula, COVA, random), sep="~")), data = PAV.means, conf.level = .90, factorize = FALSE, anova.type = "II", epsilon="none") ;  PES.lik = PES.lik[c(10,1:6,11,15,16),]
# mdl.lik.emms = emmeans(mdl.lik, pairwise ~ condition)
# 
# res$p.value = as.numeric(res$p.value)
# res$p.value = ifelse(res$p.value < 0.05,paste("<span style=\" font-weight: bold; \" >" ,sprintf("%.3f",res$p.value), "</span>"),  paste("<span>" ,sprintf("%.3f",res$p.value), "</span>"))
# 
# res$F = unlist(str_split(gsub("[^0-9.,-]", "", res$F), ","));res$pes = unlist(str_split(gsub("[^0-9.,-]", "", res$pes), ","));
# res$`90% CI` = paste(sprintf("%.3f",PES.lik[,2]), "-", sprintf("%.3f",PES.lik[,3]))
# 
# res$p.value[1]= "<span style=\" font-weight: bold;    \" >\u003C 0.001</span>"
# res$pes[c(5,7,8)]= c("\u003C 0.001")
# colnames(res)[3:5] = c( paste("F(", res$df[1], ")", sep=""),"&eta;<sub>p</sub><sup>2</sup>", "p")
# res[c(1,4,6,3,5)]  %>% kbl(digits = 2, escape = F,row.names = F)  %>%
#   kable_styling(latex_options = "striped", position = "center", full_width = F) 

# -------------------------------------- Regression table summary --------------------------------------

tab_model(fmod_full, show.p = F, show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 3, dv.labels = "Perceived liking", file = "tmp/temp3.html", rm.terms = "beta")
  Perceived liking
Predictors Estimates std. Error CI
condition 9.600 1.826 6.020 – 13.180
intervention -2.503 1.493 -5.428 – 0.422
session -2.118 1.094 -4.262 – 0.026
age 1.989 1.335 -0.627 – 4.605
gender 0.102 1.382 -2.607 – 2.811
BMI_V1 0.491 1.492 -2.433 – 3.416
hungry 5.545 1.228 3.137 – 7.953
GLP_diff 0.776 1.935 -3.017 – 4.568
reelin_diff 0.133 2.036 -3.858 – 4.124
condition * intervention 0.720 1.826 -2.859 – 4.300
condition * session -0.272 0.920 -2.075 – 1.530
intervention * session 0.316 1.078 -1.798 – 2.429
(condition
intervention)
session
1.224 0.920 -0.579 – 3.027
ICC 0.55
N id 50
Observations 184
Marginal R2 / Conditional R2 0.294 / 0.681
report = capture.output(sexit(bmod_full, ci=.9))
print(paste("Bayesian linear mixed model (estimated using MCMC sampling with" ,chains ," chains of", niter, " iterations and a warmup of", warm, ")  to predict liking with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: liking ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff). The model included condition, session and id as random effects (formula: ~condition * session | id). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 15.10) distributions"))
## [1] "Bayesian linear mixed model (estimated using MCMC sampling with 4  chains of 500  iterations and a warmup of 100 )  to predict liking with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: liking ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff). The model included condition, session and id as random effects (formula: ~condition * session | id). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 15.10) distributions"
full_tab
## Summary of Posterior Distribution
## 
## Parameter                      | Median |  MAD |         90% CI |     pd |  Rhat |     BF
## -----------------------------------------------------------------------------------------
## condition                      |   6.50 | 1.81 | [ 3.85,  9.23] |   100% | 1.033 | 804.20
## intervention                   |  -1.75 | 1.43 | [-3.77,  0.48] | 86.25% | 1.059 |   1.73
## session                        |  -1.96 | 0.94 | [-3.66, -0.37] | 97.31% | 0.999 |   2.09
## age                            |   1.64 | 1.27 | [-0.40,  3.46] | 91.06% | 1.057 |   1.08
## gender                         |   0.05 | 1.11 | [-2.10,  1.86] | 51.31% | 1.002 |  0.371
## BMI_V1                         |   0.23 | 1.45 | [-1.75,  2.51] | 56.19% | 1.026 |  0.504
## hungry                         |   4.56 | 1.28 | [ 2.85,  6.52] |   100% | 1.021 | 876.77
## GLP_diff                       |   0.18 | 1.69 | [-2.68,  2.52] | 54.25% | 1.090 |  0.561
## reelin_diff                    |  -0.54 | 1.52 | [-2.81,  2.44] | 62.88% | 1.004 |  0.630
## condition:intervention         |   0.53 | 1.49 | [-2.24,  2.87] | 62.06% | 1.002 |  0.610
## condition:session              |  -0.47 | 1.10 | [-1.98,  1.23] | 67.50% | 1.022 |  0.428
## intervention:session           |   0.38 | 0.96 | [-1.31,  2.00] | 65.94% | 1.003 |  0.366
## condition:intervention:session |   1.12 | 0.91 | [-0.46,  2.57] | 87.75% | 0.999 |  0.672
report[c(4,10)]
## [1] "- b_condition (Median = 6.50, 90% CI [3.85, 9.23]) has a 100.00% probability of being positive (> 0), 100.00% of being significant (> 1.05), and 54.69% of being large (> 6.29)"
## [2] "- b_hungry (Median = 4.56, 90% CI [2.85, 6.52]) has a 100.00% probability of being positive (> 0), 100.00% of being significant (> 1.05), and 7.62% of being large (> 6.29)"
tables <- list.clean(readHTMLTable("tmp/temp3.html"), fun = is.null, recursive = FALSE)
tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
tables2 <- as.matrix(tables2) %>% as_tibble()


tables2[is.na(tables2)] <- "";  names(tables2) <- NULL; tmp = t(tables2[(length(full_tab$Parameter)+1):(length(full_tab$Parameter)+4),1:2]);
tmp[,4][1] = "R2"; tmp[,4][2] = gsub(".*/","",tmp[,4][2])
pander::pander(tmp)
## 
## ------ ------ -------------- -------
##  ICC    N id   Observations    R2   
## 
##  0.55    50        184        0.681 
## ------ ------ -------------- -------



Plot Perceived liking (Pavlovian Cue)

dfdraws = bmod_full %>%
    spread_draws(`b_condition`,`b_hungry` )


HDI_cond_PAV = plotHDI( dfdraws$`b_condition` , credMass = .90, binSize = 100, Title = "") + theme_bw()

HDI_hunger_PAV = plotHDI( dfdraws$`b_hungry` , credMass = .90, binSize = 100, Title = "") + theme_bw() # also mcmc_plot(object = bmod_full, pars =c("b_hungry"), type ="areas", prob = 0.9)



dfdraws2 =  bmod_full %>%
     emmeans(~ condition) %>%
     gather_emmeans_draws() 


pp = dfdraws2 %>%
    ggplot(aes(x = as.factor(condition) , y = .value,  fill = as.factor(condition))) +
    geom_abline(slope= 0, intercept=0, linetype = "dashed", color = "black") +
    #geom_point(position = "jitter") +
    stat_slab(.width = c(0.50, 0.9), position="dodge", alpha=0.5) +
    stat_pointinterval(.width = c(0.50, 0.9),position="dodge") +
    ylab('Perceived liking')+
    xlab('')+
    scale_fill_manual(values=c("-1" = pal[1],"1"=pal[2]), guide="none") +
    scale_color_manual( values=c("-1" = pal[1],"1"=pal[2]), guide="none") +
    scale_x_discrete(labels=c("CS-", "CS+")) +
     scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(0,100, by = 25)), limits = c(-0.05,100.5)) +
    theme_bw()# + facet_wrap(~group, labeller=labeller(group =labels))

plt_bayes_html = pp + html_theme 
plt_bayes = pp + averaged_theme 

figurePAV <- ggarrange(plt_bayes_html, HDI_cond_PAV,
                    labels = c("A", "B"),
                    ncol = 2)
#figurePAV


# Liking

dfL <- summarySEwithin(PAV.means,
                       measurevar = "liking",
                       withinvars = c("condition", "session"), 
                       idvar = "id")

dfL$cond <- ifelse(dfL$condition == "1", -0.25, 0.25)


pp <- ggplot(PAV.means, aes(x = cond, y = liking, 
                            fill = as.factor(condition), color = as.factor(condition))) +
  geom_hline(yintercept=50, linetype="dashed", size=0.4, alpha=0.8) +
  geom_line(aes(x = condjit, group = id, y = liking), alpha = .3, size = 0.5, color = 'gray' ) +
  geom_flat_violin(scale = "count", trim = FALSE, alpha = .2, aes(fill =  as.factor(condition), color = NA)) +
  geom_point(aes(x = condjit, shape =  as.factor(intervention)), alpha = .3) +
  geom_crossbar(data = dfL, aes(y = liking, ymin=liking-se, ymax=liking+se), width = 0.2 , alpha = 0.1)+
  ylab('Liking Ratings')+
  xlab('Conditioned stimulus')+
  scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(0,100, by = 25)), limits = c(-0.05,100.5)) +
  scale_x_continuous(labels=c("CS+", "CS-"),breaks = c(-.25,.25), limits = c(-.5,.5)) +
  scale_fill_manual(values=c("1"= pal[2], "-1"=  pal[1]), guide = 'none') +
  scale_color_manual(values=c("1"= pal[2], "-1"=  pal[1]), guide = 'none') +
  scale_shape_manual(name="Intervention", labels=c("Placebo", "Liraglutide"), values = c(1, 2)) +
  theme_bw()+ facet_wrap(~session, labeller=labeller(session = labels))


plt = pp + averaged_theme  + theme(legend.position=c(.96,.94), axis.text.x = element_text(size = 14))
pp + html_theme  + theme(legend.position=c(.96,.94), axis.text.x = element_text(size = 14))
A) Posterior distribution by Pavlovian cue. B) Highest density interval (90% HDI) of the posterior distribution difference for the latency to respond between CS+ and CS-

  1. Posterior distribution by Pavlovian cue. B) Highest density interval (90% HDI) of the posterior distribution difference for the latency to respond between CS+ and CS-



cairo_pdf('figures/Figure_PavlovianLiking_Lira.pdf')
print(plt)
dev.off()
## png 
##   2
cairo_pdf('figures/Figure_PavlovianLiking_Lira_Bayes.pdf')
print(figurePAV)
dev.off()
## png 
##   2

Instrumental Conditioning Task

grips = number of times participant exceeded the force threshold to acquire the reward (Milkshake)

# Model
mf1 = formula(grips ~ trial*intervention*session + age + gender + BMI_V1  + hungry + GLP_diff + reelin_diff + (session|id) + (1|trial)) # LINEAR FIT  
mf2 = formula(grips ~  lspline(trial,5)*intervention*session + age + gender + BMI_V1  + hungry + GLP_diff + reelin_diff + (session|id) + (1|trial)) # PIECEWISE REGRESSION WITH SPLINE AT 5

# -------------------------------------- Bayesian Regression Models using Stan -------------------------------------
linmod = brm(mf1, data=INST, family = gaussian, prior = c(prior(normal(0, 3), class = "b", coef = ""), prior(normal(0, 100), class = "Intercept", coef = "")), sample_prior = T, save_pars = save_pars(all = TRUE), chains = chains,  iter = niter, warmup = warm, seed = 123, inits = 1, backend = 'cmdstanr', threads = threading(4))  # a lot to unwind here..  1) Generic weakly informative prior around 0 for fixed effects and very weak prior for the intercept 2) we need to sample priors and save parameters for computing BF  
## Running MCMC with 4 parallel chains, with 4 thread(s) per chain...
## 
## Chain 1 WARNING: There aren't enough warmup iterations to fit the 
## Chain 1          three stages of adaptation as currently configured. 
## Chain 1          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 1          the given number of warmup iterations: 
## Chain 1            init_buffer = 15 
## Chain 1            adapt_window = 75 
## Chain 1            term_buffer = 10 
## Chain 1 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 WARNING: There aren't enough warmup iterations to fit the 
## Chain 2          three stages of adaptation as currently configured. 
## Chain 2          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 2          the given number of warmup iterations: 
## Chain 2            init_buffer = 15 
## Chain 2            adapt_window = 75 
## Chain 2            term_buffer = 10 
## Chain 2 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 WARNING: There aren't enough warmup iterations to fit the 
## Chain 3          three stages of adaptation as currently configured. 
## Chain 3          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 3          the given number of warmup iterations: 
## Chain 3            init_buffer = 15 
## Chain 3            adapt_window = 75 
## Chain 3            term_buffer = 10 
## Chain 3 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 WARNING: There aren't enough warmup iterations to fit the 
## Chain 4          three stages of adaptation as currently configured. 
## Chain 4          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 4          the given number of warmup iterations: 
## Chain 4            init_buffer = 15 
## Chain 4            adapt_window = 75 
## Chain 4            term_buffer = 10 
## Chain 4 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 1 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 1 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 3 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 2 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 4 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 4 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 2 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 4 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 2 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 2 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 4 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 1 finished in 73.8 seconds.
## Chain 3 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 finished in 74.2 seconds.
## Chain 2 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 2 finished in 78.2 seconds.
## Chain 4 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 4 finished in 78.4 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 76.1 seconds.
## Total execution time: 78.8 seconds.
splinemod = update(linmod, formula. = mf2)
## Running MCMC with 4 parallel chains, with 4 thread(s) per chain...
## 
## Chain 1 WARNING: There aren't enough warmup iterations to fit the 
## Chain 1          three stages of adaptation as currently configured. 
## Chain 1          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 1          the given number of warmup iterations: 
## Chain 1            init_buffer = 15 
## Chain 1            adapt_window = 75 
## Chain 1            term_buffer = 10 
## Chain 1 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 WARNING: There aren't enough warmup iterations to fit the 
## Chain 2          three stages of adaptation as currently configured. 
## Chain 2          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 2          the given number of warmup iterations: 
## Chain 2            init_buffer = 15 
## Chain 2            adapt_window = 75 
## Chain 2            term_buffer = 10 
## Chain 2 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 WARNING: There aren't enough warmup iterations to fit the 
## Chain 3          three stages of adaptation as currently configured. 
## Chain 3          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 3          the given number of warmup iterations: 
## Chain 3            init_buffer = 15 
## Chain 3            adapt_window = 75 
## Chain 3            term_buffer = 10 
## Chain 3 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 WARNING: There aren't enough warmup iterations to fit the 
## Chain 4          three stages of adaptation as currently configured. 
## Chain 4          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 4          the given number of warmup iterations: 
## Chain 4            init_buffer = 15 
## Chain 4            adapt_window = 75 
## Chain 4            term_buffer = 10 
## Chain 4 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 2 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 4 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 4 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 3 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 1 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 1 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 4 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 3 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 2 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 1 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 2 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 4 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 3 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 2 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 2 finished in 46.9 seconds.
## Chain 4 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 4 finished in 47.6 seconds.
## Chain 3 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 finished in 48.1 seconds.
## Chain 1 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 1 finished in 49.3 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 48.0 seconds.
## Total execution time: 49.4 seconds.
linmod$loo = loo(linmod); splinemod$loo = loo(splinemod)
comp = loo::loo_compare(linmod$loo, splinemod$loo) 

bmod_full = splinemod #winner model

#lmer to compare
fmod_full = lmer(mf2 , data = INST, REML=F, control = control )

Model Comparison between linear and piecewise with spline regression
Best fit => Piecewise Regression with spline: smaller ELPD (expected log pointwise predictive density estimated via leave-one-out cross-validation) is better

comp = tibble::rownames_to_column(as_tibble(comp, rownames = NA)); colnames(comp)[1] = "model"
pander(comp[c(1:3,8:9)]) # so the splinemod was preferred as the other model had smaller ELPD. Thus, spline improved out-of-sample predictions. can also see: compare_ic(linmod, splinemod) but its deprecated
## 
## ----------------------------------------------------
##    model     elpd_diff   se_diff   looic   se_looic 
## ----------- ----------- --------- ------- ----------
##  splinemod       0          0      11921    93.56   
## 
##   linmod      -0.1822     3.012    11922    93.51   
## ----------------------------------------------------
## plot population-level effects posterior distributions and chain sampling

param = mcmc_plot(object = bmod_full, pars =c("b_.*al"), type ="areas")
param2 = mcmc_plot(object = bmod_full, pars =c("b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="areas")


trace = mcmc_plot(object = bmod_full, pars =c("b_.*al"), type ="trace")
trace2 = mcmc_plot(object = bmod_full, pars =c("b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="trace")

#check assumptions
var_group = pp_check(bmod_full, type = "stat_grouped", group = "intervention", binwidth = 0.01, nsamples = NULL) #equality of variance between groups

rep_fit = pp_check(bmod_full, nsamples = 10) # check response fit 

error = pp_check(bmod_full, type ="error_scatter_avg", nsamples = NULL) # check good alignment between model and data, and no obvious pattern to the types of errors we are getting.


#Normality of errors
residuals <-residuals(bmod_full)[, 1]; res <- qqnorm(residuals, pch = 1, plot.it=F)

lmx = plot_model(fmod_full, type = "diag"); #diagnostic plots for lmer

#plot all
diagINST <- ggarrange(param, var_group, rep_fit, error, ncol = 2, nrow = 2)

annotate_figure(diagINST, top = text_grob("Diagnostic Plots", face = "bold", size = 10))

full_tab = describe_posterior(bmod_full,
                   estimate = "median", dispersion = T,
                   ci = .9, ci_method = "hdi",
                   bf_prior = bmod_full, diagnostic = "Rhat",  
                   test = c("p_direction", "bf"))

full_tab = filter(full_tab, Parameter %notin% c("b_Intercept", "prior_beta", "prior_sigma"))

x = attributes(full_tab); x$clean_parameters[c(2:3,12:15,17:18),5] = c("trial<5",       "trial>5", "trial<5:intervention", "trial>5:intervention","trial<5:session", "trial>5:session", "trial<5:intervention:session", "trial>5:group:session"); attributes(full_tab) = x


# -------------------------------------- FREQUENTIST STATS: LRT + Bootstrap  -----------------------------------------------

# fmod_spline =  update(fmod_full, ~ .-lspline(trial,5)) #evaluate trial spline
# 
# # p-value from bootstrap distribution
# LRT_spline = PBmodcomp(fmod_full, fmod_spline, nsim = 500, seed = 123, cl=cores)


# -------------------------------------- Regression table summary --------------------------------------

tab_model(bmod_full, show.p = F, show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 3, dv.labels = "Number of Grips", file = "tmp/temp4.html", transform = NULL,  rm.terms = "beta")
  Number of Grips
Predictors Estimates std. Error CI (95%)
lsplinetrial51 0.062 0.087 -0.113 – 0.233
lsplinetrial52 -0.118 0.014 -0.147 – -0.089
intervention -1.756 1.007 -3.634 – 0.297
session 1.294 0.561 0.182 – 2.347
age -1.018 0.830 -2.834 – 0.661
gender -0.325 0.886 -1.974 – 1.440
BMI V 1 -1.330 0.975 -3.233 – 0.586
hungry -0.188 0.658 -1.566 – 1.198
GLP diff 1.162 1.184 -1.080 – 3.427
reelin diff 0.106 1.287 -2.437 – 2.700
lsplinetrial51.intervention 0.096 0.087 -0.067 – 0.256
lsplinetrial52.intervention 0.025 0.013 -0.001 – 0.053
lsplinetrial51.session -0.193 0.082 -0.341 – -0.021
lsplinetrial52.session 0.020 0.013 -0.005 – 0.044
intervention.session 0.208 0.530 -0.814 – 1.207
lsplinetrial51.intervention.session -0.116 0.086 -0.272 – 0.046
lsplinetrial52.intervention.session -0.024 0.013 -0.050 – 0.004
ICC 0.79
N id 50
N trial 24
Observations 2232
Marginal R2 / Conditional R2 0.158 / 0.789
report = capture.output(sexit(bmod_full, ci=.9))
print(paste("Bayesian linear mixed model (estimated using MCMC sampling with" ,chains ," chains of", niter, " iterations and a warmup of", warm, ") to predict grips with trial, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: grips ~ lspline(trial, 5) + intervention + session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff + lspline(trial, 5):intervention + lspline(trial, 5):session + intervention:session + lspline(trial, 5):intervention:session). The model included session, id and trial as random effects (formula: list(~session | id, ~1 | trial)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 8.90) distributions"))
## [1] "Bayesian linear mixed model (estimated using MCMC sampling with 4  chains of 500  iterations and a warmup of 100 ) to predict grips with trial, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: grips ~ lspline(trial, 5) + intervention + session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff + lspline(trial, 5):intervention + lspline(trial, 5):session + intervention:session + lspline(trial, 5):intervention:session). The model included session, id and trial as random effects (formula: list(~session | id, ~1 | trial)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 8.90) distributions"
full_tab
## Summary of Posterior Distribution
## 
## Parameter                    | Median |  MAD |         90% CI |     pd |  Rhat |     BF
## ---------------------------------------------------------------------------------------
## trial<5                      |   0.06 | 0.09 | [-0.07,  0.21] | 75.88% | 1.000 |  0.040
## trial>5                      |  -0.12 | 0.01 | [-0.14, -0.09] |   100% | 0.999 | > 1000
## intervention                 |  -1.76 | 1.01 | [-3.40, -0.11] | 95.50% | 1.010 |   1.70
## session                      |   1.29 | 0.56 | [ 0.47,  2.21] | 98.94% | 1.000 |   2.82
## age                          |  -1.02 | 0.83 | [-2.44,  0.36] | 89.31% | 1.001 |  0.721
## gender                       |  -0.32 | 0.89 | [-1.78,  1.14] | 64.69% | 1.004 |  0.315
## BMI_V1                       |  -1.33 | 0.98 | [-2.94,  0.26] | 92.06% | 0.999 |  0.856
## hungry                       |  -0.19 | 0.66 | [-1.47,  0.81] | 61.75% | 1.000 |  0.238
## GLP_diff                     |   1.16 | 1.18 | [-0.76,  3.02] | 82.75% | 1.004 |  0.552
## reelin_diff                  |   0.11 | 1.29 | [-1.89,  2.39] | 53.06% | 1.005 |  0.408
## trial<5:intervention         |   0.10 | 0.09 | [-0.05,  0.22] | 86.62% | 1.003 |  0.050
## trial>5:intervention         |   0.03 | 0.01 | [ 0.00,  0.05] | 97.19% | 1.002 |  0.030
## trial<5:session              |  -0.19 | 0.08 | [-0.31, -0.05] | 98.62% | 1.001 |  0.382
## trial>5:session              |   0.02 | 0.01 | [ 0.00,  0.04] | 94.38% | 1.000 |  0.016
## intervention:session         |   0.21 | 0.53 | [-0.57,  1.05] | 66.06% | 0.998 |  0.185
## trial<5:intervention:session |  -0.12 | 0.09 | [-0.25,  0.01] | 92.00% | 0.999 |  0.064
## trial>5:group:session        |  -0.02 | 0.01 | [-0.05,  0.00] | 95.19% | 0.998 |  0.019
report[4:7]
## [1] "- b_lsplinetrial51 (Median = 0.06, 90% CI [-0.07, 0.21]) has a 75.88% probability of being positive (> 0), 0.00% of being significant (> 0.37), and 0.00% of being large (> 2.22)"     
## [2] "- b_lsplinetrial52 (Median = -0.12, 90% CI [-0.14, -0.09]) has a 100.00% probability of being negative (< 0), 0.00% of being significant (< -0.37), and 0.00% of being large (< -2.22)"
## [3] "- b_intervention (Median = -1.76, 90% CI [-3.40, -0.11]) has a 95.50% probability of being negative (< 0), 91.06% of being significant (< -0.37), and 30.44% of being large (< -2.22)" 
## [4] "- b_session (Median = 1.29, 90% CI [0.47, 2.21]) has a 98.94% probability of being positive (> 0), 96.12% of being significant (> 0.37), and 4.06% of being large (> 2.22)"
tables <- list.clean(readHTMLTable("tmp/temp4.html"), fun = is.null, recursive = FALSE)
tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
tables2 <- as.matrix(tables2) %>% as_tibble()


tables2[is.na(tables2)] <- "";  names(tables2) <- NULL; tmp = t( tables2[(length(full_tab$Parameter)+1):(length(full_tab$Parameter)+5),1:2]);
tmp[,5][1] = "R2"; tmp[,5][2] = gsub(".*/","",tmp[,5][2])
pander::pander(tmp)
## 
## ------ ------ --------- -------------- -------
##  ICC    N id   N trial   Observations    R2   
## 
##  0.79    50      24          2232       0.789 
## ------ ------ --------- -------------- -------
# tmp %>% kbl() %>%
#   kable_styling(latex_options = "HOLD_position", position = "center", full_width = F) %>%  row_spec(1,bold=T)



Plot

int_conditions <- list(
  session = setNames(c(-1, 1), c("pre", "post"))
)
plt = conditional_effects(bmod_full, effects = "trial:session",
                    int_conditions = int_conditions) #easier to extract spline with brms

dfTRIAL = as_tibble(plt$trial); dfTRIAL$grips = dfTRIAL$estimate__

#by groups
dfTRIALg <- summarySEwithin(INST.means,
                            measurevar = "grips",
                            withinvars = c("trial", "session"),
                            betweenvars = "intervention",
                            idvar = "id")

dfTRIALg$trial       <- as.numeric(dfTRIALg$trial)

pp <-  ggplot(dfTRIAL, aes(x =trial, y = grips)) +
  geom_point(data = dfTRIALg, aes(shape = intervention), alpha = 0.3, color = 'black') +
  geom_line(data = dfTRIAL, size =1, color = pal[4]) +
  geom_ribbon(aes(ymin=grips-se__, ymax=grips+se__), fill = pal[4], alpha = 0.3, )+
  ylab('Number of Grips')+
  xlab('Trial') +
  scale_y_continuous(expand = c(0, 0),  limits = c(9.5,16.05),  breaks=c(seq.int(9,16, by = 1))) +
  scale_x_continuous(expand = c(0, 0),  limits = c(0,25),  breaks=c(seq.int(1,25, by = 2))) +
  scale_shape_manual(name="Group", labels=c("Placebo", "Liraglutide"), values = c(1, 2, 18)) +
  theme_bw() +
  facet_wrap(~session, labeller=labeller(session = labels))

pp + html_theme + theme(legend.position=c(.9,.88),axis.text.x = element_text(size = 14))
Number of grips over trials by session.

Number of grips over trials by session.

plt = pp + averaged_theme + theme(legend.position=c(.9,.88),axis.text.x = element_text(size = 14))



cairo_pdf('figures/Figure_INST_trial_LIRA.pdf')
print(plt)
dev.off()
## png 
##   2

Pavlvovian-Instrumental Transfer (PIT) Task

Mobilized effort = Area under the curve of the force exerted exceeding the delivery threshold during Pavlovian cue presentation

mf = formula(AUC ~ condition*intervention*session + age + gender + BMI_V1  + hungry + GLP_diff + reelin_diff + (condition*session|id) + (1|trialxcondition))

# -------------------------------------- Bayesian Regression Models using Stan -------------------------------------
bmod_full = brm(bf(mf, hu ~ 1), family = hurdle_gaussian, stanvars = stanvars, data=PIT.clean, prior =  c(prior(normal(0, 3), class = "b", coef = ""), prior(normal(0, 100), class = "Intercept", coef = ""), prior(logistic(0, 0.5), class = "Intercept", dpar = "hu")), sample_prior = T, save_pars = save_pars(all = TRUE), chains = chains,  iter = niter, warmup = warm, seed = 123, inits = 1, backend = 'cmdstan', threads = threading(4)) # a lot to unwind here.. 1) custom gaussian hurdle cause zero-inflated continous data more details here: https://cran.r-project.org/web/packages/brms/vignettes/brms_customfamilies.html 2) Generic weakly informative prior around 0 for fixed effects and very weak prior for the intercept 3) we need to sample priors and save parameters for computing BF 

#lmer to compare
fmod_full = lmer(mf , data = PIT.clean, REML=F, control = control)
## plot population-level effects posterior distributions and chain sampling
param = mcmc_plot(object = bmod_full,  pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="areas") 

trace = mcmc_plot(object = bmod_full,  pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1"), type ="trace")


#check assumptions
var_group = pp_check(bmod_full, type = "stat_grouped", stat = "median", group = "intervention", binwidth = 1, nsamples = NULL) #equality of variance between groups

rep_fit = pp_check(bmod_full, nsamples = 10) # check response fit 

error = pp_check(bmod_full, type ="error_scatter_avg", nsamples = NULL) # check good alignment between model and data, and no obvious pattern to the types of errors we are getting.


#Normality of errors
residuals <-residuals(bmod_full)[, 1]; res <- qqnorm(residuals, pch = 1, plot.it=F)

lmx = plot_model(fmod_full, type = "diag"); #diagnostic plots for lmer

#plot all
diagPIT <- ggarrange(param, var_group, rep_fit, error, ncol = 2, nrow = 2)

annotate_figure(diagPIT, top = text_grob("Diagnostic Plots", face = "bold", size = 10))

# check response fit -> capture better the bimodal distrib
# residual ... could be better but at least it's MUCH better than simple gaussian see lmx[1] ... this is just catastrophic..
full_tab = describe_posterior(bmod_full,
                   estimate = "median", dispersion = T,
                   ci = .9, ci_method = "hdi",
                   bf_prior = bmod_full, diagnostic = "Rhat",  
                   test = c("p_direction", "bf"))



full_tab = filter(full_tab, Parameter %notin% c("b_Intercept", "b_hu_Intercept", "prior_sigma"))

# -------------------------------------- FREQUENTIST STATS: LRT + Bootstrap  -----------------------------------------------

# fmod_cond = update(fmod_full,  ~ .-condition) #evaluate condition

# # p-value from bootstrap distribution
# LRT_cond = PBmodcomp(fmod_full, fmod_cond, nsim = 500, seed = 123, cl=cores) 

# -------------------------------------- Regression table summary --------------------------------------

tab_model(bmod_full, show.p = F,show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 3, dv.labels = "Mobilized effort (a.u.)", file = "tmp/temp5.html", transform = NULL,  rm.terms = "hu_Intercept")
  Mobilized effort (a.u.)
Predictors Estimates std. Error CI (95%)
condition 3.346 2.202 -0.780 – 7.607
intervention 0.668 2.698 -4.563 – 5.855
session 0.070 2.671 -5.243 – 5.278
age 1.564 2.587 -4.313 – 7.410
gender 0.107 2.801 -5.224 – 5.339
BMI V 1 0.289 3.037 -5.937 – 5.781
hungry -0.247 2.886 -6.113 – 5.200
GLP diff 0.658 2.792 -4.597 – 6.692
reelin diff 0.634 2.846 -5.047 – 5.965
condition.intervention 0.770 2.056 -3.099 – 4.945
condition.session -0.192 1.754 -3.539 – 3.360
intervention.session -1.165 2.729 -6.509 – 4.362
condition.intervention.session 0.867 1.702 -2.505 – 4.188
ICC 0.69
N id 50
N trialxcondition 15
Observations 2760
Marginal R2 / Conditional R2 0.005 / 0.709
report = capture.output(sexit(bmod_full, ci=.9))
print(paste("Bayesian general linear mixed model (hurdle gaussian family with a identity link) (estimated using MCMC sampling with" ,chains ," chains of", niter, " iterations and a warmup of", warm, ") to predict Mobilized Effort (AUC) with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: AUC ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff). The model included condition, session, id and trialxcondition as random effects (formula: list(~condition * session | id, ~1 | trialxcondition)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 130.20) distributions"))
## [1] "Bayesian general linear mixed model (hurdle gaussian family with a identity link) (estimated using MCMC sampling with 4  chains of 500  iterations and a warmup of 100 ) to predict Mobilized Effort (AUC) with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: AUC ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff). The model included condition, session, id and trialxcondition as random effects (formula: list(~condition * session | id, ~1 | trialxcondition)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00), and student_t (location = 0.00, scale = 130.20) distributions"
full_tab
## Summary of Posterior Distribution
## 
## Parameter                      | Median |  MAD |        90% CI |     pd |  Rhat |    BF
## ---------------------------------------------------------------------------------------
## condition                      |   3.35 | 2.20 | [-0.33, 6.60] | 93.62% | 1.010 |  1.94
## intervention                   |   0.67 | 2.70 | [-3.83, 4.88] | 60.88% | 1.026 | 0.991
## session                        |   0.07 | 2.67 | [-4.72, 3.98] | 50.88% | 1.012 | 0.885
## age                            |   1.56 | 2.59 | [-3.14, 6.23] | 73.19% | 1.006 |  1.02
## gender                         |   0.11 | 2.80 | [-5.00, 4.06] | 51.69% | 1.000 | 0.916
## BMI_V1                         |   0.29 | 3.04 | [-4.78, 5.00] | 54.31% | 1.004 |  1.11
## hungry                         |  -0.25 | 2.89 | [-5.06, 4.19] | 53.50% | 1.012 | 0.987
## GLP_diff                       |   0.66 | 2.79 | [-3.31, 5.97] | 60.56% | 1.003 | 0.947
## reelin_diff                    |   0.63 | 2.85 | [-4.23, 5.10] | 59.38% | 1.010 | 0.895
## condition:intervention         |   0.77 | 2.06 | [-2.63, 4.18] | 65.06% | 1.001 | 0.710
## condition:session              |  -0.19 | 1.75 | [-3.23, 2.65] | 53.62% | 1.010 | 0.537
## intervention:session           |  -1.16 | 2.73 | [-5.63, 3.51] | 66.56% | 1.031 | 0.975
## condition:intervention:session |   0.87 | 1.70 | [-1.92, 3.74] | 70.19% | 1.003 | 0.674
report[5]
## [1] "- b_condition (Median = 3.35, 90% CI [-0.33, 6.60]) has a 93.62% probability of being positive (> 0), 93.44% of being significant (> 0.05), and 91.44% of being large (> 0.30)"
tables <- list.clean(readHTMLTable("tmp/temp5.html"), fun = is.null, recursive = FALSE)
tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
tables2 <- as.matrix(tables2) %>% as_tibble(); 


tables2[is.na(tables2)] <- "";  names(tables2) <- NULL; tmp = t(tables2[(length(full_tab$Parameter)+1):(length(full_tab$Parameter)+5),1:2]);
tmp[,5][1] = "R2"; tmp[,5][2] = gsub(".*/","",tmp[,5][2])
pander::pander(tmp)
## 
## ------ ------ ------------------- -------------- -------
##  ICC    N id   N trialxcondition   Observations    R2   
## 
##  0.70    50           15               2760       0.709 
## ------ ------ ------------------- -------------- -------

Plot

# plot HDI
dfdraws = bmod_full %>%
    spread_draws(`b_condition` )

HDI_PIT = plotHDI( dfdraws$`b_condition` , credMass = .90, binSize = 100, Title = "") + theme_bw()


#plot estimated means from posterior distribution from the model draws


dfdraws2 =  bmod_full %>%
     emmeans(~ condition) %>%
     gather_emmeans_draws() 

# CSp = subset(dfdraws2, condition ==1); CSm = subset(dfdraws2, condition ==-1); diff= CSp; diff$.value = CSp$.value - CSm$.value


pp = dfdraws2 %>% #diff
    ggplot(aes(x = as.factor(condition) , y = .value,  fill = as.factor(condition))) +
    #geom_abline(slope= 0, intercept=0, linetype = "dashed", color = "black") +
    #geom_point(position = "jitter") +
    stat_slab(.width = c(0.50, 0.9), position="dodge", alpha=0.5) +
    stat_pointinterval(.width = c(0.50, 0.9),position="dodge") +
    ylab('Mobilized effort')+
    xlab('')+
    scale_fill_manual(values=c("1" = pal[6],"-1"=pal[1]), guide="none") +
    scale_color_manual( values=c("1" = pal[6],"-1"=pal[1]), guide="none") +
    scale_x_discrete(labels=c("CS-", "CS+")) +
    theme_bw()

plt_bayes_html = pp + html_theme 
plt_bayes = pp + averaged_theme 


figurePIT <- ggarrange(plt_bayes_html, HDI_PIT,
                    labels = c("A", "B"),
                    ncol = 2)
#figurePIT


#### Plot PIT

dfL <- summarySEwithin(PIT.means,
                       measurevar = "AUC",
                       withinvars = c("condition","session"), 
                       idvar = "id")

dfL$cond <- ifelse(dfL$condition == "1", -0.25, 0.25)
PIT.means$cond <- ifelse(PIT.means$condition == "1", -0.25, 0.25)
PIT.means <- PIT.means %>% mutate(condjit = jitter(as.numeric(cond), 0.3),grouping = interaction(id, cond))


# AUC
pp <- ggplot(PIT.means, aes(x = cond, y = AUC,
                            fill = as.factor(condition), color = as.factor(condition))) +
  geom_line(aes(x = condjit, group = id, y = AUC), alpha = .3, size = 0.5, color = 'gray' ) +
  geom_flat_violin(scale = "count", trim = FALSE, alpha = .2, aes(fill = as.factor(condition), color = NA)) +
  geom_point(aes(x = condjit, shape = as.factor(intervention)), alpha = .3) +
  geom_crossbar(data = dfL, aes(y = AUC, ymin=AUC-se, ymax=AUC+se), width = 0.2 , alpha = 0.1)+
  ylab('Mobilized effort (a.u.)')+
  xlab('Condition')+
  #scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(0,30, by = 5)), limits = c(-0.05,30.5)) +
  scale_x_continuous(labels=c("CS+", "CS-"),breaks = c(-.25,.25), limits = c(-.5,.5)) +
  scale_fill_manual(values=c("1"= pal[6], "-1"=  pal[1]), guide = 'none') +
  scale_color_manual(values=c("1"= pal[6], "-1"=  pal[1]), guide = 'none') +
  scale_shape_manual(name="Intervention", labels=c("Placebo", "Liraglutide"), values = c(1, 2)) +
  theme_bw()+ facet_wrap(~session, labeller=labeller(session = labels))



pp + html_theme +theme(legend.position=c(.94,.94))
A) Posterior distribution by group. B) Highest density interval (90% HDI) for the interaction condition by session

  1. Posterior distribution by group. B) Highest density interval (90% HDI) for the interaction condition by session

plt = pp + averaged_theme +theme(legend.position=c(.94,.94))
# PLOT OVERTIME


pp <- ggplot(PIT.p, aes(x = as.numeric(trial), y = AUC,
                        color = condition,
                        fill  = condition))+
    geom_point(data = PIT.group, aes(shape = intervention, color = condition), alpha = 0.3) +
  geom_line(alpha = .5, size = 1, show.legend = F) +
  geom_ribbon(aes(ymax = AUC + se, ymin = AUC - se),  alpha=0.4) +
  geom_point() +
  ylab('Mobilized effort (a.u.)')+
  xlab('Trial')+
  scale_color_manual(labels = c('-1'= 'CS-', "1" = 'CS+'), name="",
                     values = c("1"= pal[6], '-1'= pal[1])) +
  scale_fill_manual(labels = c('-1'= 'CS-', "1" = 'CS+'), name="",
                    values = c("1"= pal[6], '-1'= pal[1])) +
  scale_y_continuous(expand = c(0, 0),  limits = c(50,200),  breaks=c(seq.int(50,200, by = 50))) +
  scale_x_continuous(expand = c(0, 0),  limits = c(0,15),  breaks=c(seq.int(1,15, by = 2))) +
    scale_shape_manual(name="Group", labels=c("Placebo", "Liraglutide"), values = c(1, 2, 18)) +
  theme_bw() +
  facet_wrap(~session, labeller=labeller(session =labels))


pp + html_theme + theme(strip.background = element_rect(fill="white"), legend.key.size = unit(0.3, "cm"))
Mobilized effort for each condition and group over trials.

Mobilized effort for each condition and group over trials.

plt = pp + averaged_theme + theme(strip.background = element_rect(fill="white"), legend.key.size = unit(0.8, "cm"), axis.text.x = element_text(size = 16))



# bmod_time = update(bmod_full,  ~ .+as.factor(trialxcondition)) #evaluate interaction
# 
# df =  bmod_time %>%
#      emmeans(~ condition:session:trialxcondition) 
# 
# 
# 
# bayes_plt = as_tibble(df) %>%
#     ggplot(aes(x = trialxcondition, y = emmean,  fill =as.factor(condition))) +
#     #geom_point(position = "jitter") +
#     geom_smooth(method=loess, se = T,inherit.aes = T) + 
#     #stat_slab(.width = c(0.50, 0.95),position="dodge", alpha=0.5) +
#     #stat_pointinterval(.width = c(0.50, 0.95),position="dodge") +
#     ylab('Mobilized effort (a.u.) \n Baseline corrected')+
#     xlab('')+
#     scale_fill_manual(values=c("1" = pal[2],"-1"=pal[1]), guide="none") +
#     scale_color_manual( values=c("1" = pal[2],"-1"=pal[1]), guide="none") +
#     #scale_x_discrete(labels=c("CS+", "CS-")) +
#     theme_bw() + facet_wrap(~session, labeller=labeller(session =labels))


cairo_pdf('figures/Figure_PIT_Lira.pdf')
print(plt)
dev.off()
## png 
##   2
cairo_pdf('figures/Figure_PIT_Lira_Bayes.pdf')
print(figurePIT)
dev.off()
## png 
##   2

Hedonic Reactivity Test

Perceived liking = how pleasant is the liquid solution rated (0-100, with repetitions) & condition = Milshake or Tasteless & intensity = difference on how intense the liquid solution were rated (mean(Milshake) - mean(Tasteless)) & familiarity = difference on how familiar the liquid solution were rated (mean(Milshake) - mean(Tasteless))

mf = formula(perceived_liking ~ condition*intervention*session + age + gender + BMI_V1  + hungry + GLP_diff + reelin_diff + int + fam + (condition*session|id) + (1|trialxcondition))

# -------------------------------------- Bayesian Regression Models using Stan -------------------------------------
bmod_full = brm(bf(mf, hu ~ 1), family = hurdle_gaussian, stanvars = stanvars, data=HED, prior =  c(prior(normal(0, 3), class = "b", coef = ""), prior(normal(0, 100), class = "Intercept", coef = ""), prior(logistic(0, 0.5), class = "Intercept", dpar = "hu")), sample_prior = T, save_pars = save_pars(all = TRUE), chains = chains,  iter = niter, warmup = warm, seed = 123, inits = 1, backend = 'cmdstan', threads = threading(4))# a lot to unwind here.. 1)  custom gaussian hurdle cause zero-inflated continous data 2) Generic weakly informative prior around 0 for fixed effects and very weak prior for the intercept 3) we need to sample priors and save parameters for computing BF #this one is a big longer, so seat tight
## Running MCMC with 4 parallel chains, with 4 thread(s) per chain...
## 
## Chain 1 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 2 WARNING: There aren't enough warmup iterations to fit the 
## Chain 2          three stages of adaptation as currently configured. 
## Chain 2          Reducing each adaptation stage to 15%/75%/10% of 
## Chain 2          the given number of warmup iterations: 
## Chain 2            init_buffer = 15 
## Chain 2            adapt_window = 75 
## Chain 2            term_buffer = 10 
## Chain 2 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 3 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 Iteration:   1 / 500 [  0%]  (Warmup) 
## Chain 4 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 2 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 4 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 3 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 3 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 1 Iteration: 100 / 500 [ 20%]  (Warmup) 
## Chain 1 Iteration: 101 / 500 [ 20%]  (Sampling) 
## Chain 2 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 4 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 3 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 1 Iteration: 200 / 500 [ 40%]  (Sampling) 
## Chain 2 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 3 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 1 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 4 Iteration: 300 / 500 [ 60%]  (Sampling) 
## Chain 2 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 3 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 1 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 4 Iteration: 400 / 500 [ 80%]  (Sampling) 
## Chain 3 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 3 finished in 154.5 seconds.
## Chain 2 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 2 finished in 156.0 seconds.
## Chain 1 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 1 finished in 157.6 seconds.
## Chain 4 Iteration: 500 / 500 [100%]  (Sampling) 
## Chain 4 finished in 158.7 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 156.7 seconds.
## Total execution time: 159.1 seconds.
#lmer to compare
fmod_full = lmer(mf , data = HED, REML=F, control = control)
## plot population-level effects posterior distributions and chain sampling
param = mcmc_plot(object = bmod_full,  pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1", "b_int", "b_fam"), type ="areas")  # int and fam

trace = mcmc_plot(object = bmod_full,  pars =c("b_.*o", "b_.*y", "b_.*ge", "b_.*diff", "b_.*V1", "b_int", "b_fam"), type ="trace")


#check assumptions
var_group = pp_check(bmod_full, type = "stat_grouped", stat = "median", group = "intervention", binwidth = 0.1, nsamples = NULL) #equality of variance between groups

rep_fit = pp_check(bmod_full, nsamples = 10) # check response fit 

error = pp_check(bmod_full, type ="error_scatter_avg", nsamples = NULL) # check good alignment between model and data, and no obvious pattern to the types of errors we are getting.


#Normality of errors
residuals <-residuals(bmod_full)[, 1]; res <- qqnorm(residuals, pch = 1, plot.it=F)

lmx = plot_model(fmod_full, type = "diag"); #diagnostic plots for lmer

#plot all
diagPIT <- ggarrange(param, var_group, rep_fit, error, ncol = 2, nrow = 2)

annotate_figure(diagPIT, top = text_grob("Diagnostic Plots", face = "bold", size = 10))

# check response fit -> capture better the weird (bimodal at least..) distrib
# residual ... could be better but at least it's MUCH better than simple gaussian see lmx[1] ... this is just catastrophic..
full_tab = describe_posterior(bmod_full,
                   estimate = "median", dispersion = T,
                   ci = .9, ci_method = "hdi",
                   bf_prior = bmod_full, diagnostic = "Rhat",  
                   test = c("p_direction", "bf"))



full_tab = filter(full_tab, Parameter %notin% c("b_Intercept", "b_hu_Intercept", "prior_sigma"))


#contrasts 
# emmip(bmod_full, condition~intervention) to visualize
# 
# ems = emmeans(bmod_full, ~condition|intervention)
# con_inter = contrast(ems, interaction = "pairwise", by = NULL)
# 
# 
# inter_tab = describe_posterior(con_inter,
#                    estimate = "median", dispersion = TRUE,
#                    ci = .9, ci_method = "hdi",
#                    bf_prior = bmod_full, diagnostic = "Rhat",  
#                    test = c("p_direction", "bf"))



# -------------------------------------- FREQUENTIST STATS: LRT + Bootstrap  -----------------------------------------------

# fmod_cond = update(fmod_full,  ~ .-condition) #evaluate condition

# # p-value from bootstrap distribution
# LRT_cond = PBmodcomp(fmod_full, fmod_cond, nsim = 500, seed = 123, cl=cores) 

# -------------------------------------- Regression table summary --------------------------------------

tab_model(bmod_full, show.p = F,show.intercept = F, show.se = T, title ="", show.re.var = F, digits = 3, dv.labels = "Perceived liking", file = "tmp/temp6.html", transform = NULL,  rm.terms = "hu_Intercept")
  Perceived liking
Predictors Estimates std. Error CI (95%)
condition 12.920 1.567 9.242 – 15.972
intervention 1.408 1.608 -1.812 – 4.784
session 1.237 0.975 -0.678 – 3.144
age -0.484 1.499 -3.448 – 2.758
gender -2.150 1.658 -5.437 – 1.080
BMI V 1 2.054 1.580 -1.394 – 5.360
hungry 1.382 1.309 -1.142 – 3.947
GLP diff -0.441 1.839 -4.151 – 3.414
reelin diff -2.309 1.992 -6.351 – 1.634
int 0.825 2.260 -4.039 – 5.227
fam 0.781 1.583 -2.402 – 3.996
condition.intervention 2.418 1.409 -0.908 – 5.231
condition.session -0.681 0.875 -2.478 – 0.980
intervention.session 0.128 0.861 -1.603 – 1.902
condition.intervention.session 0.594 0.862 -1.190 – 2.256
ICC 0.47
N id 50
N trialxcondition 20
Observations 3680
Marginal R2 / Conditional R2 0.279 / 0.799
report = capture.output(sexit(bmod_full, ci=.9))
print(paste("Bayesian general linear mixed model (hurdle gaussian family with a identity link) (estimated using MCMC sampling with", chains," chains of", niter, " iterations and a warmup of", warm, ") to predict Perceived Liking with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: perceived_liking ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff + int + fam). The model included condition, session, id and trialxcondition as random effects (formula: list(~condition | id, ~1 | trialxcondition)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00) and student_t (location = 0.00, scale = 31.7) distributions for beta and sd respectively"))
## [1] "Bayesian general linear mixed model (hurdle gaussian family with a identity link) (estimated using MCMC sampling with 4  chains of 500  iterations and a warmup of 100 ) to predict Perceived Liking with condition, intervention, session, age, gender, BMI_V1, hungry, GLP_diff and reelin_diff (formula: perceived_liking ~ condition * intervention * session + age + gender + BMI_V1 + hungry + GLP_diff + reelin_diff + int + fam). The model included condition, session, id and trialxcondition as random effects (formula: list(~condition | id, ~1 | trialxcondition)). Priors over parameters were set as normal (mean = 0.00, SD = 3.00) and student_t (location = 0.00, scale = 31.7) distributions for beta and sd respectively"
full_tab
## Summary of Posterior Distribution
## 
## Parameter                      | Median |  MAD |         90% CI |     pd |  Rhat |     BF
## -----------------------------------------------------------------------------------------
## condition                      |  12.92 | 1.57 | [ 9.74, 15.33] |   100% | 1.013 | > 1000
## intervention                   |   1.41 | 1.61 | [-1.20,  4.25] | 81.12% | 1.001 |  0.822
## session                        |   1.24 | 0.98 | [-0.37,  2.84] | 88.69% | 1.001 |  0.630
## age                            |  -0.48 | 1.50 | [-2.95,  2.17] | 61.94% | 1.001 |  0.552
## gender                         |  -2.15 | 1.66 | [-4.90,  0.53] | 91.06% | 1.001 |   1.37
## BMI_V1                         |   2.05 | 1.58 | [-0.64,  4.97] | 88.69% | 1.005 |   1.30
## hungry                         |   1.38 | 1.31 | [-0.71,  3.47] | 85.69% | 1.005 |  0.717
## GLP_diff                       |  -0.44 | 1.84 | [-3.32,  2.83] | 59.88% | 1.009 |  0.689
## reelin_diff                    |  -2.31 | 1.99 | [-5.71,  0.83] | 87.44% | 1.011 |   1.27
## int                            |   0.82 | 2.26 | [-2.93,  4.45] | 64.31% | 1.006 |  0.822
## fam                            |   0.78 | 1.58 | [-1.86,  3.35] | 69.56% | 0.999 |  0.611
## condition:intervention         |   2.42 | 1.41 | [ 0.00,  4.92] | 93.69% | 1.000 |   2.18
## condition:session              |  -0.68 | 0.88 | [-2.11,  0.76] | 77.62% | 1.003 |  0.424
## intervention:session           |   0.13 | 0.86 | [-1.23,  1.66] | 56.62% | 1.000 |  0.294
## condition:intervention:session |   0.59 | 0.86 | [-0.71,  2.09] | 75.31% | 1.005 |  0.388
report[5]
## [1] "- b_condition (Median = 12.92, 90% CI [9.74, 15.33]) has a 100.00% probability of being positive (> 0), 100.00% of being significant (> 0.05), and 100.00% of being large (> 0.30)"
tables <- list.clean(readHTMLTable("tmp/temp6.html"), fun = is.null, recursive = FALSE)
tables2 = tables[[1]] %>% janitor::row_to_names(row_number = 1)
tables2 <- as.matrix(tables2) %>% as_tibble(); 

tables2[is.na(tables2)] <- "";  names(tables2) <- NULL; tmp = t(tables2[(length(full_tab$Parameter)+1):(length(full_tab$Parameter)+5),1:2]);
tmp[,5][1] = "R2"; tmp[,5][2] = gsub(".*/","",tmp[,5][2])
pander::pander(tmp)
## 
## ------ ------ ------------------- -------------- -------
##  ICC    N id   N trialxcondition   Observations    R2   
## 
##  0.47    50           20               3680       0.798 
## ------ ------ ------------------- -------------- -------

Plot

# plot HDI
dfdraws = bmod_full %>%
    spread_draws(`b_condition` )

HDI_HED = plotHDI( dfdraws$`b_condition` , credMass = .90, binSize = 100, Title = "") + theme_bw()


#plot estimated means from posterior distribution from the model draws


dfdraws2 =  bmod_full %>%
     emmeans(~ condition) %>%
     gather_emmeans_draws() 


pp = dfdraws2 %>% #diff
    ggplot(aes(x = as.factor(condition) , y = .value,  fill = as.factor(condition))) +
    #geom_abline(slope= 0, intercept=0, linetype = "dashed", color = "black") +
    #geom_point(position = "jitter") +
    stat_slab(.width = c(0.50, 0.9), position="dodge", alpha=0.5) +
    stat_pointinterval(.width = c(0.50, 0.9),position="dodge") +
    labs(x = "", y = "Perceived liking", title = "") + 
    scale_fill_manual(values=c("-1" = pal[1],"1"=pal[3]), guide="none") +
    scale_color_manual( values=c("-1" = pal[1],"1"=pal[3]), guide="none") +
    scale_x_discrete(labels=c("Tasteless", "Milkshake")) +
    scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(0,100, by = 20)), limits = c(-0.5,100.5)) +
    theme_bw()


plt_bayes_html = pp + html_theme 
plt_bayes = pp + averaged_theme 


figureHED <- ggarrange(plt_bayes_html, HDI_HED,
                    labels = c("A", "B"),
                    ncol = 2)
#figureHED


# AVERAGED EFFECT
dfH <- summarySEwithin(HED.means,
                       measurevar = "perceived_liking",
                       withinvars = c("condition","session"),
                       idvar = "id")

dfH$cond <- ifelse(dfH$condition == "1", -0.25, 0.25)
HED.means$cond <- ifelse(HED.means$condition == "1", -0.25, 0.25)
HED.means <- HED.means %>% mutate(condjit = jitter(as.numeric(cond), 0.3),
                                  grouping = interaction(id, cond))


pp <- ggplot(HED.means, aes(x = cond, y = perceived_liking,
                            fill = as.factor(condition), color = as.factor(condition))) +
  geom_point(data = dfH, alpha = 0.5) +
  geom_line(aes(x = condjit, group = id, y = perceived_liking), alpha = .3, size = 0.5, color = 'gray') +
  geom_flat_violin(scale = "count", trim = FALSE, alpha = .2, aes(fill = as.factor(condition), color = NA))+
  geom_point(aes(x = condjit, shape = as.factor(intervention)), alpha = .3,) +
  geom_crossbar(data = dfH, aes(y = perceived_liking, ymin=perceived_liking-se, ymax=perceived_liking+se), width = 0.2 , alpha = 0.1)+
  ylab('Perceived liking') +
  xlab('Taste') +
  scale_y_continuous(expand = c(0, 0), breaks = c(seq.int(0,100, by = 20)), limits = c(-0.5,100.5)) +
  scale_x_continuous(labels=c("Pleasant", "Neutral"),breaks = c(-.25,.25), limits = c(-.5,.5)) +
  scale_fill_manual(values=c("1"= pal[3], "-1"=pal[1]), guide = 'none') +
  scale_color_manual(values=c("1"=pal[3], "-1"=pal[1]), guide = 'none') +
  scale_shape_manual(name="Intervention", labels=c("Placebo", "Liraglutide"), values = c(1, 2)) +
  theme_bw()+ facet_wrap(~session, labeller=labeller(session = labels))


pp + html_theme +theme(legend.position=c(.96,.94))
A) Posterior distribution by group. B) Highest density interval (90% HDI) for the interaction condition by session

  1. Posterior distribution by group. B) Highest density interval (90% HDI) for the interaction condition by session

plt = pp + averaged_theme +theme(legend.position=c(.96,.94))
# PLOT OVERTIME


pp <- ggplot(HED.p, aes(x = as.numeric(trial), y = perceived_liking,
                        color = condition,
                        fill  = condition))+
    geom_point(data = HED.group, aes(shape = intervention, color = condition), alpha = 0.3) +
  geom_line(alpha = .5, size = 1, show.legend = F) +
  geom_ribbon(aes(ymax = perceived_liking + se, ymin = perceived_liking - se),  alpha=0.4) +
  geom_point() +
  ylab('Perceived Liking')+
  xlab('Trial')+
  scale_color_manual(labels = c('Pleasant', 'Neutral'), name = "",
                     values = c( "1" =pal[3], '-1' =pal[1])) +
  scale_fill_manual(labels = c('Pleasant', 'Neutral'), name = "",
                    values = c( "1" =pal[3], '-1'=pal[1])) +
  scale_y_continuous(expand = c(0, 0),  limits = c(0,100),  breaks=c(seq.int(0,100, by = 20))) +
    scale_x_continuous(expand = c(0, 0),  limits = c(0,21),  breaks=c(seq.int(1,21, by = 2))) +
  guides(color=guide_legend(override.aes=list(fill=c(pal[3], pal[1]), color=c(pal[3], pal[1]))))+
    scale_shape_manual(name="Group", labels=c("Placebo", "Liraglutide"), values = c(1, 2, 18)) +
  theme_bw() +
  facet_wrap(~session, labeller=labeller(session =labels))


pp + html_theme + theme(strip.background = element_rect(fill="white"), legend.justification = "top", legend.position="right", axis.text.x = element_text(size = 14))
Perceived liking for each condition and group over trials.

Perceived liking for each condition and group over trials.

plt = pp + averaged_theme + theme(strip.background = element_rect(fill="white"), axis.text.x = element_text(size = 14), legend.justification = "top", legend.position="right")



##We see here the base difference between Placebo VS Liraglutide BUT it's the same for both sessions so .. no effect of the treatment really XXXX

# bmod_time = update(bmod_full,  ~ .+as.factor(trialxcondition)) #evaluate interaction
# 
# df =  bmod_time %>%
#      emmeans(~ condition:session:trialxcondition) 
# 
# 
# 
# bayes_plt = as_tibble(df) %>%
#     ggplot(aes(x = trialxcondition, y = emmean,  fill =as.factor(condition))) +
#     #geom_point(position = "jitter") +
#     geom_smooth(method=loess, se = T,inherit.aes = T) + 
#     #stat_slab(.width = c(0.50, 0.95),position="dodge", alpha=0.5) +
#     #stat_pointinterval(.width = c(0.50, 0.95),position="dodge") +
#     ylab('Mobilized effort (a.u.) \n Baseline corrected')+
#     xlab('')+
#     scale_fill_manual(values=c("1" = pal[2],"-1"=pal[1]), guide="none") +
#     scale_color_manual( values=c("1" = pal[2],"-1"=pal[1]), guide="none") +
#     #scale_x_discrete(labels=c("CS+", "CS-")) +
#     theme_bw() + facet_wrap(~session, labeller=labeller(session =labels))



cairo_pdf('figures/Figure_HED_Lira.pdf')
print(plt)
dev.off()
## png 
##   2
cairo_pdf('figures/Figure_HED_Lira_Bayes.pdf')
print(figureHED)
dev.off()
## png 
##   2

This part are not run here for different reasons (time it take and need to be on the cluster)

cwd=pwd
cd /home/OBIWAN/LIRA/fMRI/HED_GLM

fMRI group Analysis

cwd=$(pwd)

cd /home/OBIWAN/DERIVATIVES/GLM/AFNI/hedonicreactivity/LIRA/ # go to fMRI directory


#I invite to take a look at the script
nohup bash -x HED.txt |& tee diary.txt & # runs the AFNI group analysis script in the background and output logs as it goes in the "diary.txt" file


#Convert from AFNI format to NIFTI #-prefix test lme+tlrc[i]
for i in  8 10 12
  do 3dAFNItoNIFTI -prefix LME_8_con${i}_z LME_8+tlrc[${i}]
     fslmaths LME_8_con${i}_z -ztop -add -1 -mul -1 LME_8_con${i}_1-p # convert to pvalue then inverse it for display 
done

cd $cwd

check out “analysis_LIRA_fMRI.R” for more details

# mdl.hf = aov_car(data = inter, HF_inter ~ intervention + BMI_diff + age + Error(id), factorize = F, type = "II", anova_table = list(correction = "GG", es = "pes"))
# 
# res = nice(mdl.hf, MSE=F); ref_grid(mdl.hf)  #triple check everything is centered at 0
# 
# 
# #calculate Partial eta-squared and its 90 % CI for each effect
# PES.hf = pes_ci(data = inter, HF_inter ~ intervention + BMI_diff + age + Error(id), conf.level = .90, factorize = FALSE, anova.type = "II", epsilon="none") ;
# mdl.hf.emms = emmeans(mdl.hf, pairwise ~ intervention)

Contrast Post > Pre in HF

# res$p.value = as.numeric(res$p.value)
# res$p.value = ifelse(res$p.value < 0.05,paste("<span style=\" font-weight: bold; \" >" ,sprintf("%.3f",res$p.value), "</span>"),  paste("<span>" ,sprintf("%.3f",res$p.value), "</span>"))
# 
# res$F = unlist(str_split(gsub("[^0-9.,-]", "", res$F), ","));res$pes = unlist(str_split(gsub("[^0-9.,-]", "", res$pes), ","));
# res$`90% CI` = paste(sprintf("%.3f",PES.hf[,2]), "-", sprintf("%.3f",PES.hf[,3]))
# 
# res$p.value[1]= "<span style=\" font-weight: bold;    \" >\u003C 0.001</span>"
# #res$pes[c(5,7,8)]= c("\u003C 0.001")
# colnames(res)[3:5] = c( paste("F(", res$df[1], ")", sep=""),"&eta;<sub>p</sub><sup>2</sup>", "p")
# res[c(1,4,6,3,5)]  %>% kbl(digits = 2, escape = F,row.names = F)  %>%
#   kable_styling(latex_options = "striped", position = "center", full_width = F)
# #print('PES: intervention: Overall higher weight loss for treament (Liraglutide) group')
# #PES.weight[1,]
# mod = lm(HF_inter ~ intervention + BMI_diff + age, data = inter)
# x = plot_model(mod, type = "diag")
# x[c(2,3,4,1)]
 #check assumptions



Packages

report::report_packages()
##   - repro (version 0.1.0; Aaron Peikert, Andreas Brandmaier and Caspar van Lissa, 2021)
##   - ggpubr (version 0.4.0; Alboukadel Kassambara, 2020)
##   - papaja (version 0.1.0.9997; Aust, Barth, 2020)
##   - tinylabels (version 0.2.1; Barth, 2021)
##   - cowplot (version 1.1.1; Claus Wilke, 2020)
##   - apaTables (version 2.0.8; David Stanley, 2021)
##   - Rcpp (version 1.0.6; Dirk Eddelbuettel and Romain Francois, 2011)
##   - Matrix (version 1.3.2; Douglas Bates and Martin Maechler, 2021)
##   - lme4 (version 1.1.27.1; Douglas Bates et al., 2015)
##   - XML (version 3.99.0.6; Duncan Temple Lang, 2021)
##   - intmed (version 0.1.2; Gary Chan, 2020)
##   - pander (version 0.6.4; Gergely Daróczi and Roman Tsegelskyi, 2021)
##   - ggplot2 (version 3.3.4; Wickham. ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York, 2016.)
##   - plyr (version 1.8.6; Hadley Wickham, 2011)
##   - stringr (version 1.4.0; Hadley Wickham, 2019)
##   - tidyr (version 1.1.3; Hadley Wickham, 2021)
##   - usethis (version 2.0.1; Hadley Wickham and Jennifer Bryan, 2021)
##   - devtools (version 2.4.2; Hadley Wickham, Jim Hester and Winston Chang, 2021)
##   - dplyr (version 1.0.7; Hadley Wickham et al., 2021)
##   - kableExtra (version 1.3.4; Hao Zhu, 2021)
##   - afex (version 0.28.1; Henrik Singmann et al., 2021)
##   - ggthemes (version 4.2.4; Jeffrey Arnold, 2021)
##   - glmnet (version 4.1.2; Jerome Friedman et al., 2010)
##   - optimx (version 2021.6.12; John Nash, Ravi Varadhan, 2011)
##   - cmdstanr (version 0.4.0.9000; Jonah Gabry and Rok Češnovar, 2021)
##   - JWileymisc (version 1.2.0; Joshua Wiley, 2020)
##   - tidybayes (version 2.3.1; Kay M, 2020)
##   - MBESS (version 4.8.0; Ken Kelley, 2020)
##   - tibble (version 3.1.2; Kirill Müller and Hadley Wickham, 2021)
##   - rlist (version 0.4.6.1; Kun Ren, 2016)
##   - sjPlot (version 2.8.8; Lüdecke D, 2021)
##   - bayestestR (version 0.10.0; Makowski et al., 2019)
##   - coda (version 0.19.4; Martyn Plummer et al., 2006)
##   - caret (version 6.0.88; Max Kuhn, 2021)
##   - lspline (version 1.0.0; Michal Bojanowski, 2017)
##   - brms (version 2.15.0; Paul-Christian Bürkner, 2017)
##   - R (version 4.0.4; R Core Team, 2021)
##   - psych (version 2.1.6; Revelle, 2021)
##   - BayesFactor (version 0.9.12.4.2; Richard Morey and Jeffrey Rouder, 2018)
##   - emmeans (version 1.6.1; Russell Lenth, 2021)
##   - Rmisc (version 1.5; Ryan Hope, 2013)
##   - janitor (version 2.1.0; Sam Firke, 2021)
##   - lattice (version 0.20.41; Sarkar, Deepayan, 2008)
##   - corrplot (version 0.89; Taiyun Wei and Viliam Simko, 2021)
##   - knitr (version 1.33; Yihui Xie, 2021)
# aaronpeikert/repro@devel #repro, crsh/papaja@devel,knitr, afex #aov_car/lmer, emmeans #emmeans, parallel #detectCores, tidyverse ##ggplot/dplyr/plyr/tidyr, car #densityPlot, lspline # lspline, JWileymisc #egtable, kableExtra #kable_styling, glmnet #cv.glmnet, ggthemes #theme_fivethirtyeight,  MBESS #pes_ci, sjPlot #tab_model/plot_model, Rmisc #SummarySEwithin, janitor #row_to_names, rlist #list.clean, sessioninfo #sessioninfo, stringr #str_list, XML #readHTMLtable, bayestestR #bayesfactor_models, cmdstanr #multithead, ggpubr::ggarrange #   Arrange Multiple ggplots, tidybayes #spread draws]